diff --git a/Makefile b/Makefile
index 87ad0b69d..f25087c5c 100644
--- a/Makefile
+++ b/Makefile
@@ -46,7 +46,7 @@ else
endif
endif
-RESTIC_VER := 0.17.3
+RESTIC_VER := 0.18.1
###
### These variables should not need tweaking.
diff --git a/go.mod b/go.mod
index ecaed1a50..b0e78f36e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,22 +1,22 @@
module stash.appscode.dev/stash
-go 1.24.0
+go 1.25
require (
- github.com/go-sql-driver/mysql v1.8.1
+ github.com/go-sql-driver/mysql v1.9.3
github.com/gogo/protobuf v1.3.2
- github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0
- github.com/onsi/ginkgo/v2 v2.17.2
- github.com/onsi/gomega v1.33.1
+ github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0
+ github.com/onsi/ginkgo/v2 v2.22.1
+ github.com/onsi/gomega v1.36.2
github.com/pkg/errors v0.9.1
- github.com/spf13/cobra v1.8.0
- github.com/spf13/pflag v1.0.5
- go.bytebuilders.dev/audit v0.0.41
- go.bytebuilders.dev/license-proxyserver v0.0.20
- go.bytebuilders.dev/license-verifier v0.14.6
- go.bytebuilders.dev/license-verifier/kubernetes v0.14.6
- golang.org/x/text v0.31.0
- gomodules.xyz/blobfs v0.1.14
+ github.com/spf13/cobra v1.10.1
+ github.com/spf13/pflag v1.0.9
+ go.bytebuilders.dev/audit v0.0.46
+ go.bytebuilders.dev/license-proxyserver v0.0.24
+ go.bytebuilders.dev/license-verifier v0.14.10
+ go.bytebuilders.dev/license-verifier/kubernetes v0.14.10
+ golang.org/x/text v0.32.0
+ gomodules.xyz/blobfs v0.2.2
gomodules.xyz/cert v1.6.0
gomodules.xyz/encoding v0.0.8
gomodules.xyz/envsubst v0.2.0
@@ -27,218 +27,255 @@ require (
gomodules.xyz/runtime v0.3.0
gomodules.xyz/stow v0.2.4
gomodules.xyz/x v0.0.17
- k8s.io/api v0.30.2
- k8s.io/apiextensions-apiserver v0.30.2
- k8s.io/apimachinery v0.30.2
- k8s.io/apiserver v0.30.2
- k8s.io/client-go v0.30.2
+ k8s.io/api v0.34.3
+ k8s.io/apiextensions-apiserver v0.34.3
+ k8s.io/apimachinery v0.34.3
+ k8s.io/apiserver v0.34.3
+ k8s.io/client-go v0.34.3
+ k8s.io/component-base v0.34.3
k8s.io/klog/v2 v2.130.1
- k8s.io/kube-aggregator v0.30.2
- k8s.io/kubernetes v1.30.2
- kmodules.xyz/client-go v0.30.44
- kmodules.xyz/constants v0.0.0-20230304030334-d2d1f28732a5
- kmodules.xyz/csi-utils v0.29.1
- kmodules.xyz/custom-resources v0.30.0
- kmodules.xyz/objectstore-api v0.29.1
- kmodules.xyz/offshoot-api v0.30.1
+ k8s.io/kube-aggregator v0.34.3
+ k8s.io/kubernetes v1.34.3
+ kmodules.xyz/client-go v0.34.2
+ kmodules.xyz/constants v0.0.0-20250815043538-9de88de78858
+ kmodules.xyz/csi-utils v0.34.0
+ kmodules.xyz/custom-resources v0.34.0
+ kmodules.xyz/objectstore-api v0.34.0
+ kmodules.xyz/offshoot-api v0.34.0
kmodules.xyz/openshift v0.29.0
- kmodules.xyz/prober v0.29.0
- kmodules.xyz/webhook-runtime v0.29.1
- sigs.k8s.io/controller-runtime v0.18.4
- stash.appscode.dev/apimachinery v0.42.1-0.20251212062910-cc7cccc43100
+ kmodules.xyz/prober v0.34.0
+ kmodules.xyz/webhook-runtime v0.34.0
+ sigs.k8s.io/controller-runtime v0.22.4
+ stash.appscode.dev/apimachinery v0.42.2-0.20251230090158-1034b727fe48
)
require (
- cloud.google.com/go/compute/metadata v0.3.0 // indirect
+ cel.dev/expr v0.24.0 // indirect
+ cloud.google.com/go/auth v0.15.0 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
+ cloud.google.com/go/compute/metadata v0.7.0 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
- github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
- github.com/Azure/go-autorest/autorest v0.11.29 // indirect
- github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
- github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
- github.com/Azure/go-autorest/logger v0.2.1 // indirect
- github.com/Azure/go-autorest/tracing v0.6.0 // indirect
+ github.com/Azure/go-autorest/autorest v0.11.30 // indirect
+ github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect
+ github.com/Azure/go-autorest/autorest/date v0.3.1 // indirect
+ github.com/Azure/go-autorest/logger v0.2.2 // indirect
+ github.com/Azure/go-autorest/tracing v0.6.1 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
- github.com/Masterminds/semver/v3 v3.3.0 // indirect
- github.com/Masterminds/sprig/v3 v3.2.3 // indirect
+ github.com/Masterminds/semver/v3 v3.3.1 // indirect
+ github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.2.1 // indirect
- github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
- github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
- github.com/aws/aws-sdk-go v1.45.7 // indirect
+ github.com/aws/aws-sdk-go v1.55.6 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.39.6 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.31.17 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.18.21 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
+ github.com/aws/smithy-go v1.23.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
- github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudevents/sdk-go/v2 v2.15.2 // indirect
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect
- github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
+ github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
- github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
+ github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
- github.com/distribution/reference v0.5.0 // indirect
- github.com/docker/cli v24.0.9+incompatible // indirect
- github.com/docker/distribution v2.8.2+incompatible // indirect
- github.com/docker/docker v28.0.0+incompatible // indirect
- github.com/docker/docker-credential-helpers v0.7.0 // indirect
+ github.com/distribution/reference v0.6.0 // indirect
+ github.com/docker/cli v29.0.3+incompatible // indirect
+ github.com/docker/distribution v2.8.3+incompatible // indirect
+ github.com/docker/docker-credential-helpers v0.9.4 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
- github.com/emicklei/go-restful/v3 v3.12.1 // indirect
- github.com/evanphx/json-patch v5.9.0+incompatible // indirect
- github.com/evanphx/json-patch/v5 v5.9.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.13.0 // indirect
+ github.com/evanphx/json-patch v5.9.11+incompatible // indirect
+ github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/fatih/structs v1.1.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
- github.com/gabriel-vasile/mimetype v1.4.3 // indirect
+ github.com/fsnotify/fsnotify v1.9.0 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.11 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/jsonpointer v0.21.0 // indirect
- github.com/go-openapi/jsonreference v0.21.0 // indirect
- github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/go-openapi/jsonpointer v0.22.1 // indirect
+ github.com/go-openapi/jsonreference v0.21.2 // indirect
+ github.com/go-openapi/swag v0.25.1 // indirect
+ github.com/go-openapi/swag/cmdutils v0.25.1 // indirect
+ github.com/go-openapi/swag/conv v0.25.1 // indirect
+ github.com/go-openapi/swag/fileutils v0.25.1 // indirect
+ github.com/go-openapi/swag/jsonname v0.25.1 // indirect
+ github.com/go-openapi/swag/jsonutils v0.25.1 // indirect
+ github.com/go-openapi/swag/loading v0.25.1 // indirect
+ github.com/go-openapi/swag/mangling v0.25.1 // indirect
+ github.com/go-openapi/swag/netutils v0.25.1 // indirect
+ github.com/go-openapi/swag/stringutils v0.25.1 // indirect
+ github.com/go-openapi/swag/typeutils v0.25.1 // indirect
+ github.com/go-openapi/swag/yamlutils v0.25.1 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gofrs/uuid v4.4.0+incompatible // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/protobuf v1.5.4 // indirect
- github.com/google/btree v1.1.2 // indirect
- github.com/google/cel-go v0.17.8 // indirect
- github.com/google/gnostic-models v0.6.8 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
- github.com/google/go-containerregistry v0.19.1 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/cel-go v0.26.0 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
+ github.com/google/go-containerregistry v0.20.7 // indirect
github.com/google/gofuzz v1.2.0 // indirect
- github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect
- github.com/google/s2a-go v0.1.7 // indirect
- github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
+ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
+ github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.12.0 // indirect
- github.com/gorilla/websocket v1.5.1 // indirect
+ github.com/google/wire v0.6.0 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
+ github.com/googleapis/gax-go/v2 v2.14.1 // indirect
+ github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 // indirect
- github.com/jonboulle/clockwork v0.4.0 // indirect
- github.com/josharian/intern v1.0.0 // indirect
+ github.com/jonboulle/clockwork v0.5.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
- github.com/klauspost/cpuid/v2 v2.0.9 // indirect
+ github.com/klauspost/compress v1.18.1 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.5 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
- github.com/moby/spdystream v0.2.0 // indirect
+ github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
- github.com/nats-io/nats.go v1.39.0 // indirect
- github.com/nats-io/nkeys v0.4.9 // indirect
+ github.com/nats-io/nats.go v1.47.0 // indirect
+ github.com/nats-io/nkeys v0.4.11 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/ncw/swift v1.0.49 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
+ github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
- github.com/prometheus/client_golang v1.18.0 // indirect
+ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
+ github.com/prometheus/client_golang v1.22.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.46.0 // indirect
- github.com/prometheus/procfs v0.15.0 // indirect
- github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9 // indirect
- github.com/rancher/rancher/pkg/client v0.0.0-20240710123941-93e332156bbe // indirect
- github.com/rancher/wrangler/v3 v3.0.0 // indirect
+ github.com/prometheus/common v0.62.0 // indirect
+ github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/rancher/norman v0.5.2 // indirect
+ github.com/rancher/rancher/pkg/client v0.0.0-20250220153925-3abb578f42fe // indirect
+ github.com/rancher/wrangler/v3 v3.2.0-rc.3 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/sergi/go-diff v1.2.0 // indirect
+ github.com/sergi/go-diff v1.3.1 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cast v1.7.0 // indirect
- github.com/stoewer/go-strcase v1.2.0 // indirect
- github.com/vbatts/tar-split v0.11.3 // indirect
+ github.com/stoewer/go-strcase v1.3.0 // indirect
+ github.com/vbatts/tar-split v0.12.2 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/yudai/gojsondiff v1.0.0 // indirect
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
- go.etcd.io/etcd/api/v3 v3.5.10 // indirect
- go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect
- go.etcd.io/etcd/client/v3 v3.5.10 // indirect
+ go.etcd.io/etcd/api/v3 v3.6.4 // indirect
+ go.etcd.io/etcd/client/pkg/v3 v3.6.4 // indirect
+ go.etcd.io/etcd/client/v3 v3.6.4 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
- go.opentelemetry.io/otel v1.24.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 // indirect
- go.opentelemetry.io/otel/metric v1.24.0 // indirect
- go.opentelemetry.io/otel/sdk v1.22.0 // indirect
- go.opentelemetry.io/otel/trace v1.24.0 // indirect
- go.opentelemetry.io/proto/otlp v1.0.0 // indirect
- go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
+ go.opentelemetry.io/otel v1.36.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect
+ go.opentelemetry.io/otel/metric v1.36.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.36.0 // indirect
+ go.opentelemetry.io/otel/trace v1.36.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
- gocloud.dev v0.26.0 // indirect
- golang.org/x/crypto v0.45.0 // indirect
- golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
+ gocloud.dev v0.41.0 // indirect
+ golang.org/x/crypto v0.46.0 // indirect
+ golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.47.0 // indirect
- golang.org/x/oauth2 v0.27.0 // indirect
- golang.org/x/sync v0.18.0 // indirect
- golang.org/x/sys v0.38.0 // indirect
- golang.org/x/term v0.37.0 // indirect
- golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.38.0 // indirect
- golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
+ golang.org/x/oauth2 v0.33.0 // indirect
+ golang.org/x/sync v0.19.0 // indirect
+ golang.org/x/sys v0.39.0 // indirect
+ golang.org/x/term v0.38.0 // indirect
+ golang.org/x/time v0.13.0 // indirect
+ golang.org/x/tools v0.39.0 // indirect
+ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f // indirect
gomodules.xyz/counter v0.0.1 // indirect
- gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
+ gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
gomodules.xyz/jsonpath v0.0.2 // indirect
gomodules.xyz/mergo v0.3.13 // indirect
gomodules.xyz/password-generator v0.2.9 // indirect
gomodules.xyz/sets v0.2.1 // indirect
gomodules.xyz/sync v0.1.0 // indirect
gomodules.xyz/wait v0.2.0 // indirect
- google.golang.org/api v0.155.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
- google.golang.org/grpc v1.62.1 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ google.golang.org/api v0.228.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect
+ google.golang.org/grpc v1.72.1 // indirect
+ google.golang.org/protobuf v1.36.10 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gotest.tools/v3 v3.5.0 // indirect
- k8s.io/cli-runtime v0.30.2 // indirect
- k8s.io/component-base v0.30.2 // indirect
- k8s.io/kms v0.30.2 // indirect
- k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2 // indirect
- k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect
+ k8s.io/cli-runtime v0.34.3 // indirect
+ k8s.io/component-helpers v0.34.3 // indirect
+ k8s.io/controller-manager v0.34.3 // indirect
+ k8s.io/kms v0.34.3 // indirect
+ k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
+ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
kmodules.xyz/apiversion v0.2.0 // indirect
- kmodules.xyz/go-containerregistry v0.0.12 // indirect
- kmodules.xyz/resource-metadata v0.24.3 // indirect
- kmodules.xyz/resource-metrics v0.30.5 // indirect
+ kmodules.xyz/go-containerregistry v0.0.15 // indirect
+ kmodules.xyz/resource-metadata v0.40.2 // indirect
+ kmodules.xyz/resource-metrics v0.34.0 // indirect
moul.io/http2curl/v2 v2.3.1-0.20221024080105-10c404f653f7 // indirect
- sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
- sigs.k8s.io/cli-utils v0.35.0 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
- sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
- sigs.k8s.io/yaml v1.4.0 // indirect
- x-helm.dev/apimachinery v0.0.16 // indirect
+ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect
+ sigs.k8s.io/cli-utils v0.37.2 // indirect
+ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
+ sigs.k8s.io/kustomize/api v0.20.1 // indirect
+ sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
+ sigs.k8s.io/yaml v1.6.0 // indirect
+ x-helm.dev/apimachinery v0.0.18 // indirect
)
replace github.com/Masterminds/sprig/v3 => github.com/gomodules/sprig/v3 v3.2.3-0.20220405051441-0a8a99bac1b8
-replace sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.18.4-0.20240603164526-fa88ec2314fe
+replace sigs.k8s.io/controller-runtime => github.com/kmodules/controller-runtime v0.22.5-0.20251227114913-f011264689cd
replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.6
-replace k8s.io/apiserver => github.com/kmodules/apiserver v0.30.3-0.20240717062442-8d4dcc0bdd0b
-
-replace k8s.io/kubernetes => github.com/kmodules/kubernetes v1.31.0-alpha.0.0.20240827232825-183ac6f06fdd
+replace k8s.io/apiserver => github.com/kmodules/apiserver v0.34.4-0.20251227112449-07fa35efc6fc
diff --git a/go.sum b/go.sum
index fe4e951fb..387692ce1 100644
--- a/go.sum
+++ b/go.sum
@@ -1,3 +1,5 @@
+cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
+cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -5,141 +7,74 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
-cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
-cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
-cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
-cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
-cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA=
-cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
-cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
-cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
-cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
-cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
-cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
-cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
-cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
-cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U=
-cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
-cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
-cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
+cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA=
+cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q=
+cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
+cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
+cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
+cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
-cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw=
-cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
-cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
-cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
-cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
+cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
-cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
-cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
-cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw=
-cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
-cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI=
-cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8=
-cloud.google.com/go/kms v1.1.0/go.mod h1:WdbppnCDMDpOvoYBMn1+gNmOeEoZYqAv+HeuKARGCXI=
-cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
-cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4=
-cloud.google.com/go/monitoring v1.4.0/go.mod h1:y6xnxfwI3hTFWOdkOaD7nfJVlwuC3/mS/5kvtT131p4=
+cloud.google.com/go/iam v1.4.2 h1:4AckGYAYsowXeHzsn/LCKWIwSWLkdb0eGjH8wWkd27Q=
+cloud.google.com/go/iam v1.4.2/go.mod h1:REGlrt8vSlh4dfCJfSEcNjLGq75wW75c5aU3FLOYq34=
+cloud.google.com/go/monitoring v1.24.1 h1:vKiypZVFD/5a3BbQMvI4gZdl8445ITzXFh257XBgrS0=
+cloud.google.com/go/monitoring v1.24.1/go.mod h1:Z05d1/vn9NaujqY2voG6pVQXoJGbp+r3laV+LySt9K0=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/pubsub v1.19.0/go.mod h1:/O9kmSe9bb9KRnIAWkzmqhPjHo6LtzGOBYd/kr06XSs=
-cloud.google.com/go/secretmanager v1.3.0/go.mod h1:+oLTkouyiYiabAQNugCeTS3PAArGiMJuBqvJnJsyH+U=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA=
-cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
-cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
-cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A=
-cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM=
-contrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
-contrib.go.opencensus.io/exporter/stackdriver v0.13.10/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8=
-contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
+cloud.google.com/go/storage v1.51.0 h1:ZVZ11zCiD7b3k+cH5lQs/qcNaoSz3U9I0jgwVzqDlCw=
+cloud.google.com/go/storage v1.51.0/go.mod h1:YEJfu/Ki3i5oHC/7jyTgsGZwdQ8P9hqMqvpi5kRKGgc=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
-github.com/Azure/azure-amqp-common-go/v3 v3.2.1/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI=
-github.com/Azure/azure-amqp-common-go/v3 v3.2.2/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI=
-github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v59.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0=
-github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8=
-github.com/Azure/azure-service-bus-go v0.11.5/go.mod h1:MI6ge2CuQWBVq+ly456MY7XqNLJip5LO1iSFodbNLbU=
-github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck=
-github.com/Azure/go-amqp v0.16.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg=
-github.com/Azure/go-amqp v0.16.4/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
-github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
-github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs=
-github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
-github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
+github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE=
+github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
-github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
-github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.9/go.mod h1:hg3/1yw0Bq87O3KvvnJoAh34/0zbP7SFizX/qN5JvjU=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=
+github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4=
+github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/date v0.3.1 h1:o9Z8Jyt+VJJTCZ/UORishuHOusBwolhjokt9s5k8I4w=
+github.com/Azure/go-autorest/autorest/date v0.3.1/go.mod h1:Dz/RDmXlfiFFS/eW+b/xMUSFs1tboPVy6UjgADToWDM=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=
-github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
-github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
-github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
+github.com/Azure/go-autorest/autorest/to v0.4.1 h1:CxNHBqdzTr7rLtdrtb5CMjJcDut+WNGCVv7OmS5+lTc=
+github.com/Azure/go-autorest/autorest/to v0.4.1/go.mod h1:EtaofgU4zmtvn1zT2ARsjRFdq9vXx0YWtmElwL+GZ9M=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.2 h1:hYqBsEBywrrOSW24kkOCXRcKfKhK76OzLTfF+MYDE2o=
+github.com/Azure/go-autorest/logger v0.2.2/go.mod h1:I5fg9K52o+iuydlWfa9T5K6WFos9XYr9dYTFzpqgibw=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
-github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/Azure/go-autorest/tracing v0.6.1 h1:YUMSrC/CeD1ZnnXcNYU4a/fzsO35u2Fsful9L/2nyR0=
+github.com/Azure/go-autorest/tracing v0.6.1/go.mod h1:/3EgjbsjraOqiicERAeu3m7/z0x1TzjQGAwDrJrXGkc=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/GoogleCloudPlatform/cloudsql-proxy v1.29.0/go.mod h1:spvB9eLJH9dutlbPSRmHvSXXHOwGRyeXh1jVdquA2G8=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
-github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4=
+github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@@ -147,9 +82,8 @@ github.com/PuerkitoBio/purell v1.2.1 h1:QsZ4TjvwiMpat6gBCBxEQI0rcS9ehtkKtSpiUnd9
github.com/PuerkitoBio/purell v1.2.1/go.mod h1:ZwHcC/82TOaovDi//J/804umJFFmbOHPngi8iYYv/Eo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
-github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
-github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
+github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
+github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs=
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@@ -157,67 +91,47 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ=
-github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.20.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
-github.com/aws/aws-sdk-go v1.45.7 h1:k4QsvWZhm8409TYeRuTV1P6+j3lLKoe+giFA/j3VAps=
-github.com/aws/aws-sdk-go v1.45.7/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
-github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
-github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY=
-github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM=
-github.com/aws/aws-sdk-go-v2/config v1.15.3/go.mod h1:9YL3v07Xc/ohTsxFXzan9ZpFpdTOFl4X65BAKYaz8jg=
-github.com/aws/aws-sdk-go-v2/config v1.18.25 h1:JuYyZcnMPBiFqn87L2cRppo+rNwgah6YwD3VuyvaW6Q=
-github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4=
-github.com/aws/aws-sdk-go-v2/credentials v1.11.2/go.mod h1:j8YsY9TXTm31k4eFhspiQicfXPLZ0gYXA50i4gxPE8g=
-github.com/aws/aws-sdk-go-v2/credentials v1.13.24 h1:PjiYyls3QdCrzqUN35jMWtUK1vqVZ+zLfdOa/UPFDp0=
-github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3/go.mod h1:uk1vhHHERfSVCUnqSqz8O48LBYDSC+k6brng09jcMOk=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3 h1:jJPgroehGvjrde3XufFIJUZVK5A2L9a3KwSFgKy9n8w=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.3 h1:ir7iEq78s4txFGgwcLqD6q9IIPzTQNRJXulJd9h/zQo=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.3/go.mod h1:0dHuD2HZZSiwfJSy1FO5bX1hQ1TxVV1QXXjpn3XUE44=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33 h1:kG5eQilShqmJbv11XL1VpyDbaEJzWxd4zRiCG30GSn4=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27 h1:vFQlirhuM8lLlpI7imKOMsjdQLuN9CPi+k44F/OFVsk=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10/go.mod h1:8DcYQcz0+ZJaSxANlHIsbbi6S+zMwjwdDqwW3r9AzaE=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34 h1:gGLG7yKaXG02/jBlg210R7VgQIotiQntNhsCFejawx8=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1 h1:T4pFel53bkHjL2mMo+4DKE6r6AuoZnM0fg7k1/ratr4=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.3 h1:I0dcwWitE752hVSMrsLCxqNQ+UdEp3nACx2bYNMQq+k=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.3/go.mod h1:Seb8KNmD6kVTjwRjVEgOT5hPin6sq+v4C2ycJQDwuH8=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 h1:0iKliEXAcCa2qVtRs7Ot5hItA2MsufrphbRFlz1Owxo=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3 h1:BKjwCJPnANbkwQ8vzSbaZDKawwagDubrH/z/c0X+kbQ=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3/go.mod h1:Bm/v2IaN6rZ+Op7zX+bOUMdL4fsrYZiD0dsjLhNKwZc=
-github.com/aws/aws-sdk-go-v2/service/kms v1.16.3/go.mod h1:QuiHPBqlOFCi4LqdSskYYAWpQlx3PKmohy+rE2F+o5g=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.26.3 h1:rMPtwA7zzkSQZhhz9U3/SoIDz/NZ7Q+iRn4EIO8rSyU=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.26.3/go.mod h1:g1qvDuRsJY+XghsV6zg00Z4KJ7DtFFCx8fJD2a491Ak=
-github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.15.4/go.mod h1:PJc8s+lxyU8rrre0/4a0pn2wgwiDvOEzoOjcJUBr67o=
-github.com/aws/aws-sdk-go-v2/service/sns v1.17.4/go.mod h1:kElt+uCcXxcqFyc+bQqZPFD9DME/eC6oHBXvFzQ9Bcw=
-github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3/go.mod h1:skmQo0UPvsjsuYYSYMVmrPc1HWCbHUJyrCEp+ZaLzqM=
-github.com/aws/aws-sdk-go-v2/service/ssm v1.24.1/go.mod h1:NR/xoKjdbRJ+qx0pMR4mI+N/H1I1ynHwXnO6FowXJc0=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.3/go.mod h1:7UQ/e69kU7LDPtY40OyoHYgRmgfGM4mgsLYtcObdveU=
-github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 h1:UBQjaMTCKwyUYwiVnUt6toEJwGXsLBI6al083tpjJzY=
-github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 h1:PkHIIJs8qvq0e5QybnZoG1K/9QTrLr9OsqCIo59jOBA=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8=
-github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 h1:2DQLAKDteoEDI8zpCzqBMaZlJuoE9iTYD0gFmXVax9E=
-github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8=
-github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
-github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
-github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
-github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
+github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk=
+github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
+github.com/aws/aws-sdk-go-v2/config v1.31.17 h1:QFl8lL6RgakNK86vusim14P2k8BFSxjvUkcWLDjgz9Y=
+github.com/aws/aws-sdk-go-v2/config v1.31.17/go.mod h1:V8P7ILjp/Uef/aX8TjGk6OHZN6IKPM5YW6S78QnRD5c=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.21 h1:56HGpsgnmD+2/KpG0ikvvR8+3v3COCwaF4r+oWwOeNA=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.21/go.mod h1:3YELwedmQbw7cXNaII2Wywd+YY58AmLPwX4LzARgmmA=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13 h1:T1brd5dR3/fzNFAQch/iBKeX07/ffu/cLu+q+RuzEWk=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.13/go.mod h1:Peg/GBAQ6JDt+RoBf4meB1wylmAipb7Kg2ZFakZTlwk=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 h1:6VFPH/Zi9xYFMJKPQOX5URYkQoXRWeJ7V/7Y6ZDYoms=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69/go.mod h1:GJj8mmO6YT6EqgduWocwhMoxTLFitkhIrK+owzrYL2I=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3 h1:x2Ibm/Af8Fi+BH+Hsn9TXGdT+hKbDd5XOTZxTMxDk7o=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.3/go.mod h1:IW1jwyrQgMdhisceG8fQLmQIydcT/jWY21rFhzgaKwo=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13 h1:kDqdFvMY4AtKoACfzIGD8A0+hbT41KTKF//gq7jITfM=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.13/go.mod h1:lmKuogqSU3HzQCwZ9ZtcqOc5XGMqtDK7OIc2+DxiUEg=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 h1:jIiopHEV22b4yQP2q36Y0OmwLbsxNWdWwfZRR5QRRO4=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 h1:0JPwLz1J+5lEOfy/g0SURC9cxhbQ1lIMHMa+AHZSzz0=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.1/go.mod h1:fKvyjJcz63iL/ftA6RaM8sRCtN4r4zl4tjL3qw5ec7k=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 h1:OWs0/j2UYR5LOGi88sD5/lhN6TDLG6SfA7CqsQO9zF0=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5/go.mod h1:klO+ejMvYsB4QATfEOIXk8WAEwN4N0aBfJpvC+5SZBo=
+github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 h1:mLlUgHn02ue8whiR4BmxxGJLR2gwU6s6ZzJ5wDamBUs=
+github.com/aws/aws-sdk-go-v2/service/sts v1.39.1/go.mod h1:E19xDjpzPZC7LS2knI9E6BaRFDK43Eul7vd6rSq2HWk=
+github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM=
+github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -226,159 +140,143 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
-github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764=
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc=
github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
-github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
-github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
+github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k=
+github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
-github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
-github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
+github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8=
+github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
-github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
+github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
+github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU=
-github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
-github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
-github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
-github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
-github.com/docker/cli v24.0.9+incompatible h1:OxbimnP/z+qVjDLpq9wbeFU3Nc30XhSe+LkwYQisD50=
-github.com/docker/cli v24.0.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
-github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
-github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v28.0.0+incompatible h1:Olh0KS820sJ7nPsBKChVhk5pzqcwDR15fumfAd/p9hM=
-github.com/docker/docker v28.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
-github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
+github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E=
+github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
+github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI=
+github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
-github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
+github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
-github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
-github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
-github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
-github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
+github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
+github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
+github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
+github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
-github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
-github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
+github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
-github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
-github.com/gin-gonic/gin v1.7.3/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE=
+github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
-github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
-github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
-github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
-github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
-github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
-github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
-github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
-github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
-github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
-github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
-github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/go-openapi/jsonpointer v0.22.1 h1:sHYI1He3b9NqJ4wXLoJDKmUmHkWy/L7rtEo92JUxBNk=
+github.com/go-openapi/jsonpointer v0.22.1/go.mod h1:pQT9OsLkfz1yWoMgYFy4x3U5GY5nUlsOn1qSBH5MkCM=
+github.com/go-openapi/jsonreference v0.21.2 h1:Wxjda4M/BBQllegefXrY/9aq1fxBA8sI5M/lFU6tSWU=
+github.com/go-openapi/jsonreference v0.21.2/go.mod h1:pp3PEjIsJ9CZDGCNOyXIQxsNuroxm8FAJ/+quA0yKzQ=
+github.com/go-openapi/swag v0.25.1 h1:6uwVsx+/OuvFVPqfQmOOPsqTcm5/GkBhNwLqIR916n8=
+github.com/go-openapi/swag v0.25.1/go.mod h1:bzONdGlT0fkStgGPd3bhZf1MnuPkf2YAys6h+jZipOo=
+github.com/go-openapi/swag/cmdutils v0.25.1 h1:nDke3nAFDArAa631aitksFGj2omusks88GF1VwdYqPY=
+github.com/go-openapi/swag/cmdutils v0.25.1/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
+github.com/go-openapi/swag/conv v0.25.1 h1:+9o8YUg6QuqqBM5X6rYL/p1dpWeZRhoIt9x7CCP+he0=
+github.com/go-openapi/swag/conv v0.25.1/go.mod h1:Z1mFEGPfyIKPu0806khI3zF+/EUXde+fdeksUl2NiDs=
+github.com/go-openapi/swag/fileutils v0.25.1 h1:rSRXapjQequt7kqalKXdcpIegIShhTPXx7yw0kek2uU=
+github.com/go-openapi/swag/fileutils v0.25.1/go.mod h1:+NXtt5xNZZqmpIpjqcujqojGFek9/w55b3ecmOdtg8M=
+github.com/go-openapi/swag/jsonname v0.25.1 h1:Sgx+qbwa4ej6AomWC6pEfXrA6uP2RkaNjA9BR8a1RJU=
+github.com/go-openapi/swag/jsonname v0.25.1/go.mod h1:71Tekow6UOLBD3wS7XhdT98g5J5GR13NOTQ9/6Q11Zo=
+github.com/go-openapi/swag/jsonutils v0.25.1 h1:AihLHaD0brrkJoMqEZOBNzTLnk81Kg9cWr+SPtxtgl8=
+github.com/go-openapi/swag/jsonutils v0.25.1/go.mod h1:JpEkAjxQXpiaHmRO04N1zE4qbUEg3b7Udll7AMGTNOo=
+github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1 h1:DSQGcdB6G0N9c/KhtpYc71PzzGEIc/fZ1no35x4/XBY=
+github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.1/go.mod h1:kjmweouyPwRUEYMSrbAidoLMGeJ5p6zdHi9BgZiqmsg=
+github.com/go-openapi/swag/loading v0.25.1 h1:6OruqzjWoJyanZOim58iG2vj934TysYVptyaoXS24kw=
+github.com/go-openapi/swag/loading v0.25.1/go.mod h1:xoIe2EG32NOYYbqxvXgPzne989bWvSNoWoyQVWEZicc=
+github.com/go-openapi/swag/mangling v0.25.1 h1:XzILnLzhZPZNtmxKaz/2xIGPQsBsvmCjrJOWGNz/ync=
+github.com/go-openapi/swag/mangling v0.25.1/go.mod h1:CdiMQ6pnfAgyQGSOIYnZkXvqhnnwOn997uXZMAd/7mQ=
+github.com/go-openapi/swag/netutils v0.25.1 h1:2wFLYahe40tDUHfKT1GRC4rfa5T1B4GWZ+msEFA4Fl4=
+github.com/go-openapi/swag/netutils v0.25.1/go.mod h1:CAkkvqnUJX8NV96tNhEQvKz8SQo2KF0f7LleiJwIeRE=
+github.com/go-openapi/swag/stringutils v0.25.1 h1:Xasqgjvk30eUe8VKdmyzKtjkVjeiXx1Iz0zDfMNpPbw=
+github.com/go-openapi/swag/stringutils v0.25.1/go.mod h1:JLdSAq5169HaiDUbTvArA2yQxmgn4D6h4A+4HqVvAYg=
+github.com/go-openapi/swag/typeutils v0.25.1 h1:rD/9HsEQieewNt6/k+JBwkxuAHktFtH3I3ysiFZqukA=
+github.com/go-openapi/swag/typeutils v0.25.1/go.mod h1:9McMC/oCdS4BKwk2shEB7x17P6HmMmA6dQRtAkSnNb8=
+github.com/go-openapi/swag/yamlutils v0.25.1 h1:mry5ez8joJwzvMbaTGLhw8pXUnhDK91oSJLDPF1bmGk=
+github.com/go-openapi/swag/yamlutils v0.25.1/go.mod h1:cm9ywbzncy3y6uPm/97ysW8+wZ09qsks+9RS8fLWKqg=
+github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
+github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
-github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
-github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -386,149 +284,100 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
-github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
-github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
-github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8=
+github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
+github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
-github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
-github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodules/sprig/v3 v3.2.3-0.20220405051441-0a8a99bac1b8 h1:rWzwdmHqkXrISyacSZcK9oLZIu5nIxesPwp9f8LpTvc=
github.com/gomodules/sprig/v3 v3.2.3-0.20220405051441-0a8a99bac1b8/go.mod h1:70huEoC6heWUvVNiFAnIRaEmvzAECK551RuYBCkT13w=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
-github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
-github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
-github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
+github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY=
-github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
-github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk=
-github.com/google/go-replayers/httpreplay v1.1.1/go.mod h1:gN9GeLIs7l6NUoVaSSnv2RiqK1NiwAmD0MrKeC9IIks=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I=
+github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM=
+github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo=
+github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI=
+github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk=
+github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
-github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
+github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
-github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/readahead v0.0.0-20161222183148-eaceba169032/go.mod h1:qYysrqQXuV4tzsizt4oOQ6mrBZQ0xnQXP3ylXX8Jk5Y=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
-github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
-github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
-github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
+github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8=
-github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI=
+github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
-github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
-github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
-github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
-github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
+github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
+github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
-github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
-github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 h1:qnpSQwGEnkcRpTqNOIR6bJbR0gAorgP9CSALpRcKoAA=
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1/go.mod h1:lXGCsh6c22WGtjr+qGHj1otzZpV/1kwTMAqkwZsnWRU=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0 h1:FbSCl+KggFl+Ocym490i/EyXF4lPgLoUtcSWquBM0Rs=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.0/go.mod h1:qOchhhIlmRcqk/O9uCo/puJlyo07YINaIqdZfZG3Jkc=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
-github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
-github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=
-github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -551,123 +400,56 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
-github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
-github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
-github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
-github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
-github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
-github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
-github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
-github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
-github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
-github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
-github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
-github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=
-github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=
-github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
-github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
-github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
-github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
-github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
-github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
-github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
-github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
-github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
-github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
-github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
-github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
-github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
-github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=
-github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
-github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
-github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
-github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
-github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
-github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw=
-github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY=
github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
-github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
-github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
+github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
-github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=
-github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/kmodules/apiserver v0.30.3-0.20240717062442-8d4dcc0bdd0b h1:pJA5vV5MDtLX3KP7tGuT05QTquydQyxNAy0R75chrDY=
-github.com/kmodules/apiserver v0.30.3-0.20240717062442-8d4dcc0bdd0b/go.mod h1:BOTdFBIch9Sv0ypSEcUR6ew/NUFGocRFNl72Ra7wTm8=
-github.com/kmodules/controller-runtime v0.18.4-0.20240603164526-fa88ec2314fe h1:6nl5dIci8FTzM2hxZ89ufxTXUYqLr9kSGEPPwX87ryk=
-github.com/kmodules/controller-runtime v0.18.4-0.20240603164526-fa88ec2314fe/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg=
-github.com/kmodules/kubernetes v1.31.0-alpha.0.0.20240827232825-183ac6f06fdd h1:tyuju63lhMQkiG+1bxclnSSA+t8VCMMrRUPC8UgESCY=
-github.com/kmodules/kubernetes v1.31.0-alpha.0.0.20240827232825-183ac6f06fdd/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0=
+github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
+github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
+github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
+github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/kmodules/apiserver v0.34.4-0.20251227112449-07fa35efc6fc h1:R5bKc1c8Qu7z+7+O0xNWxIPjCYuaHUVZ+dSfeCZEd+c=
+github.com/kmodules/apiserver v0.34.4-0.20251227112449-07fa35efc6fc/go.mod h1:QPnnahMO5C2m3lm6fPW3+JmyQbvHZQ8uudAu/493P2w=
+github.com/kmodules/controller-runtime v0.22.5-0.20251227114913-f011264689cd h1:cpLV7Pr+pSo3kDYY4HsLZfbdF1WPQuPTP+Jo3hyoWzw=
+github.com/kmodules/controller-runtime v0.22.5-0.20251227114913-f011264689cd/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0 h1:j3YK74myEQRxR/srciTpOrm221SAvz6J5OVWbyfeXFo=
-github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0/go.mod h1:FlyYFe32mPxKEPaRXKNxfX576d1AoCzstYDoOOnyMA4=
-github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
-github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
-github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 h1:bMqrb3UHgHbP+PW9VwiejfDJU1R0PpXVZNMdeH8WYKI=
+github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
-github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
-github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
-github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
-github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@@ -683,24 +465,21 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
-github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
+github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
@@ -708,10 +487,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nats-io/nats.go v1.39.0 h1:2/yg2JQjiYYKLwDuBzV0FbB2sIV+eFNkEevlRi4n9lI=
-github.com/nats-io/nats.go v1.39.0/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM=
-github.com/nats-io/nkeys v0.4.9 h1:qe9Faq2Gxwi6RZnZMXfmGMZkg3afLLOtrU+gDZJ35b0=
-github.com/nats-io/nkeys v0.4.9/go.mod h1:jcMqs+FLG+W5YO36OX6wFIFcmpdAns+w1Wm6D3I/evE=
+github.com/nats-io/nats.go v1.47.0 h1:YQdADw6J/UfGUd2Oy6tn4Hq6YHxCaJrVKayxxFqYrgM=
+github.com/nats-io/nats.go v1.47.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
+github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
+github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/ncw/swift v1.0.49 h1:eQaKIjSt/PXLKfYgzg01nevmO+CMXfXGRhB1gOhDs7E=
@@ -721,24 +500,25 @@ github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JX
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
-github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g=
-github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
-github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
-github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM=
+github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM=
+github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
+github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
-github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
+github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -746,8 +526,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/pquerna/ffjson v0.0.0-20190930134022-aa0246cd15f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
-github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
-github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -755,27 +535,23 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
-github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
+github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
+github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek=
-github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9 h1:AlRMRs5mHJcdiK83KKJyFVeybPMZ7dOUzC0l3k9aUa8=
-github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9/go.mod h1:dyjfXBsNiroPWOdUZe7diUOUSLf6HQ/r2kEpwH/8zas=
-github.com/rancher/rancher/pkg/client v0.0.0-20240710123941-93e332156bbe h1:ZD+h5ylTFvzjMDb/DS0R+q3FuoGSB6IFgd8bqRIrnZY=
-github.com/rancher/rancher/pkg/client v0.0.0-20240710123941-93e332156bbe/go.mod h1:A+DTKG05BZs1mOoCIB6UpiKo7j0dC6kSz3mgYju9Q20=
-github.com/rancher/wrangler/v3 v3.0.0 h1:IHHCA+vrghJDPxjtLk4fmeSCFhNe9fFzLFj3m2B0YpA=
-github.com/rancher/wrangler/v3 v3.0.0/go.mod h1:Dfckuuq7MJk2JWVBDywRlZXMxEyPxHy4XqGrPEzu5Eg=
+github.com/rancher/norman v0.5.2 h1:rwUKZ0QeVKJEtznhRdNQUMJtKjSoLYbFuPQGXm6xTxw=
+github.com/rancher/norman v0.5.2/go.mod h1:lDO9ylAYBwch9FiYyuuWlYd7+IxgRgh0ioDJBweC7t4=
+github.com/rancher/rancher/pkg/client v0.0.0-20250220153925-3abb578f42fe h1:ckqJXsEOiyEolfnry5tJ6LDTNkeloYwdizGymrAIjTM=
+github.com/rancher/rancher/pkg/client v0.0.0-20250220153925-3abb578f42fe/go.mod h1:sA4Fa3EAKYMqxvLWdAVZHkjnahHq5zYFXVFNQZSTyPs=
+github.com/rancher/wrangler/v3 v3.2.0-rc.3 h1:MySHWLxLLrGrM2sq5YYp7Ol1kQqYt9lvIzjGR50UZ+c=
+github.com/rancher/wrangler/v3 v3.2.0-rc.3/go.mod h1:0C5QyvSrQOff8gQQzpB/L/FF03EQycjR3unSJcKCHno=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
-github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
-github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
-github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@@ -783,17 +559,12 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
-github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
-github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
-github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
+github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -808,47 +579,48 @@ github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
-github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
-github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
+github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
+github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
-github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
+github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
+github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
+github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
-github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
-github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
-github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
-github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
+github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4=
+github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk=
+github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
@@ -857,131 +629,102 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3Ifn
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
+github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
-github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
-go.bytebuilders.dev/audit v0.0.41 h1:zjTOCqxi95ZBzw65+i9lT2+RVABAL4evMZTMJlfPzMk=
-go.bytebuilders.dev/audit v0.0.41/go.mod h1:7sv+kb79UJ49GgH83218Tq9jlJcFsDTOjRSHF+2my/U=
-go.bytebuilders.dev/license-proxyserver v0.0.20 h1:gzRSwUmX/LSwPVE6T9oy5RLIutU1EeI7hmS+QGsYBY4=
-go.bytebuilders.dev/license-proxyserver v0.0.20/go.mod h1:2PJmjMCXncVyeP3fIVQ+hwZnuhmWSTmbcuEMBrFKIac=
-go.bytebuilders.dev/license-verifier v0.14.6 h1:0iHYGURUbx8toiXvFKftn/qMpeHzqHbAgEnEzOCNLvo=
-go.bytebuilders.dev/license-verifier v0.14.6/go.mod h1:LqWXJKee5ofDcCYM6T5WilYlUc4NlKeZz58tHwO8GEs=
-go.bytebuilders.dev/license-verifier/kubernetes v0.14.6 h1:NxmASX0A3lu+ABd4zuT5Ib+I63y3j5uJxmlUFEGxqWg=
-go.bytebuilders.dev/license-verifier/kubernetes v0.14.6/go.mod h1:N5QxsJF4EGLduOsTsW9gGfRuuMvN33T8pg5Y9NfKzuo=
+go.bytebuilders.dev/audit v0.0.46 h1:NR/BUY1JWVk8zptwtOSuBjmzpKdGB8JV3tPsDNuSQf8=
+go.bytebuilders.dev/audit v0.0.46/go.mod h1:tyWb5h5R49qZGjixwwyHKGcoy24iug7tEHutRJEpiZI=
+go.bytebuilders.dev/license-proxyserver v0.0.24 h1:IXFrxXmPM9s/t9UB75caumgR0rl56GuIPJxv/ifFTd0=
+go.bytebuilders.dev/license-proxyserver v0.0.24/go.mod h1:MUwspWlc+ScUdJIjb9BEeS8g7GadJ4hgf+cByNW2o9w=
+go.bytebuilders.dev/license-verifier v0.14.10 h1:K4VZjaoDXQde8QtL2kzpgk0jHw3W5CxFK9vh78RbDbQ=
+go.bytebuilders.dev/license-verifier v0.14.10/go.mod h1:+cr+kft45r9BbsmZ9D5MGK9CrOf0VL3kBuOd/MiahdA=
+go.bytebuilders.dev/license-verifier/kubernetes v0.14.10 h1:5L7ICdR4kkOVy8rb0tMrTR3wC/yGvam9DIQvKHo8yk8=
+go.bytebuilders.dev/license-verifier/kubernetes v0.14.10/go.mod h1:DXxySMXnkwJuGtzkPkY2nf3D5tBS9H6qrlQaCIQ5VPU=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
-go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
-go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k=
-go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI=
-go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0=
-go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U=
-go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
-go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
-go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao=
-go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc=
-go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
-go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
-go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
-go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
-go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
-go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
-go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
+go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
+go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
+go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo=
+go.etcd.io/etcd/api/v3 v3.6.4/go.mod h1:eFhhvfR8Px1P6SEuLT600v+vrhdDTdcfMzmnxVXXSbk=
+go.etcd.io/etcd/client/pkg/v3 v3.6.4 h1:9HBYrjppeOfFjBjaMTRxT3R7xT0GLK8EJMVC4xg6ok0=
+go.etcd.io/etcd/client/pkg/v3 v3.6.4/go.mod h1:sbdzr2cl3HzVmxNw//PH7aLGVtY4QySjQFuaCgcRFAI=
+go.etcd.io/etcd/client/v3 v3.6.4 h1:YOMrCfMhRzY8NgtzUsHl8hC2EBSnuqbR3dh84Uryl7A=
+go.etcd.io/etcd/client/v3 v3.6.4/go.mod h1:jaNNHCyg2FdALyKWnd7hxZXZxZANb0+KGY+YQaEMISo=
+go.etcd.io/etcd/pkg/v3 v3.6.4 h1:fy8bmXIec1Q35/jRZ0KOes8vuFxbvdN0aAFqmEfJZWA=
+go.etcd.io/etcd/pkg/v3 v3.6.4/go.mod h1:kKcYWP8gHuBRcteyv6MXWSN0+bVMnfgqiHueIZnKMtE=
+go.etcd.io/etcd/server/v3 v3.6.4 h1:LsCA7CzjVt+8WGrdsnh6RhC0XqCsLkBly3ve5rTxMAU=
+go.etcd.io/etcd/server/v3 v3.6.4/go.mod h1:aYCL/h43yiONOv0QIR82kH/2xZ7m+IWYjzRmyQfnCAg=
+go.etcd.io/raft/v3 v3.6.0 h1:5NtvbDVYpnfZWcIHgGRk9DyzkBIXOi8j+DDp1IcnUWQ=
+go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0 h1:DeFD0VgTZ+Cj6hxravYYZE2W4GlneVH81iAOPjZkzk8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.20.0/go.mod h1:GijYcYmNpX1KazD5JmWGsi4P7dDTTTnfv1UbGn84MnU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0 h1:gvmNvqrPYovvyRmCSygkUDyL8lC5Tl845MLEwqpxhEU=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.20.0/go.mod h1:vNUq47TGFioo+ffTSnKNdob241vePmtNZnAODKapKd0=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
-go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
-go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
-go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
-go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA=
+go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
+go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
+go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
+go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
+go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
+go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
+go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
+go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
+go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
+go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
+go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
+go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
+go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
-go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
-go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
-gocloud.dev v0.26.0 h1:4rM/SVL0lLs+rhC0Gmc+gt/82DBpb7nbpIZKXXnfMXg=
-gocloud.dev v0.26.0/go.mod h1:mkUgejbnbLotorqDyvedJO20XcZNTynmSeVSQS9btVg=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
+gocloud.dev v0.41.0 h1:qBKd9jZkBKEghYbP/uThpomhedK5s2Gy6Lz7h/zYYrM=
+gocloud.dev v0.41.0/go.mod h1:IetpBcWLUwroOOxKr90lhsZ8vWxeSkuszBnW62sbcf0=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211115234514-b4de73f9ece8/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
-golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
-golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
+golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
-golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -991,24 +734,17 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1023,81 +759,36 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
-golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
-golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
+golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
+golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
-golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1105,108 +796,52 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
-golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
+golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
-golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
+golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
-golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
+golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
+golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1215,74 +850,34 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
-golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
-golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
-golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
-golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
+golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
+golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
-golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-gomodules.xyz/blobfs v0.1.14 h1:8Iq62ojIi1JNY5iMcebJj45D2oMVvphpJ/T0sCybwI8=
-gomodules.xyz/blobfs v0.1.14/go.mod h1:DIon2nI14qPceQ3ShZi6lAQGFjUzpeXUdjHnqwu6ryg=
+golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
+golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+gomodules.xyz/blobfs v0.2.2 h1:7Udq0t+px7jszXCod9sq1qNpfukrfTHGh7MzbPNRLuk=
+gomodules.xyz/blobfs v0.2.2/go.mod h1:8H68WZnLvsuHRM/BqKTtqupPDvNa7L1U4uXBEAsWMwo=
gomodules.xyz/cert v1.6.0 h1:IzeWRoCFAZrHozsMh94+05R6vtY9zSMB2eZ/NpE/ySI=
gomodules.xyz/cert v1.6.0/go.mod h1:RGLh9tSem4k43LTsS0wXwmvzJs09lDR19BTtCYhMsHE=
gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f h1:hTyhR4r+tj1Uq7/PpFxLTzbeA0LhMVp7bEYfhkzFjdY=
@@ -1297,8 +892,8 @@ gomodules.xyz/flags v0.1.3 h1:jQ06+EfmoMv5NvjXvJon03dOhLU+FF0TQMWN7I6qpzs=
gomodules.xyz/flags v0.1.3/go.mod h1:e+kvBLnqdEWGG670SKOYag1CXStM2Slrxq01OIK3tFs=
gomodules.xyz/go-sh v0.1.0 h1:1BJAuGREh2RhePt7HRrpmjnkbgfpXlCzc42SiyZ5dkc=
gomodules.xyz/go-sh v0.1.0/go.mod h1:N8IrjNiYppUI/rxENYrWD6FOrSxSyEZnIekPEWM7LP0=
-gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
-gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
+gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0=
+gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
gomodules.xyz/jsonpath v0.0.2 h1:taUvqxKQ9KqVl3vq/+hLg7rCZUIQjq+izhbvo6nTIkE=
gomodules.xyz/jsonpath v0.0.2/go.mod h1:du28vmLHrgEV48JqK/7rn92YHsVDoQuqrowb2w6YZmE=
gomodules.xyz/logs v0.0.7 h1:dkhpdQuzj+pOS3S7VaOq+JV7BVU7f68/k3uDYufhPow=
@@ -1330,52 +925,12 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
-google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
-google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
-google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
-google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
-google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
-google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
-google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
-google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
-google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
-google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
-google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
-google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
-google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
-google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
-google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
-google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M=
-google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
-google.golang.org/api v0.68.0/go.mod h1:sOM8pTpwgflXRhz+oC8H2Dr+UcbMqkPPWNJo88Q7TH8=
-google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80=
-google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
-google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
-google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
-google.golang.org/api v0.155.0 h1:vBmGhCYs0djJttDNynWo44zosHlPvHmA0XiN2zP2DtA=
-google.golang.org/api v0.155.0/go.mod h1:GI5qK5f40kCpHfPn6+YzGAByIKWv8ujFnmoWm7Igduk=
+google.golang.org/api v0.228.0 h1:X2DJ/uoWGnY5obVjewbp8icSL5U4FzuCfy9OjbLSnLs=
+google.golang.org/api v0.228.0/go.mod h1:wNvRS1Pbe8r4+IfBIniV8fwCpGwTrYa+kMUDiC5z5a4=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1385,116 +940,22 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
-google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
-google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
-google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
-google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
-google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
-google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
-google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
-google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
-google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
-google.golang.org/genproto v0.0.0-20220401170504-314d38edb7de/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
-google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
-google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto v0.0.0-20250324211829-b45e905df463 h1:qEFnJI6AnfZk0NNe8YTyXQh5i//Zxi4gBHwRgp76qpw=
+google.golang.org/genproto v0.0.0-20250324211829-b45e905df463/go.mod h1:SqIx1NV9hcvqdLHo7uNZDS5lrUJybQ3evo3+z/WBfA0=
+google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463 h1:hE3bRWtU6uceqlh4fhrSnUyjKHMKB9KrTLLG+bc0ddM=
+google.golang.org/genproto/googleapis/api v0.0.0-20250324211829-b45e905df463/go.mod h1:U90ffi8eUL9MwPcrJylN5+Mk2v3vuPDptd5yyNUiRR8=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
-google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
-google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA=
+google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1503,14 +964,9 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
+google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1518,7 +974,8 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
+gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
+gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -1531,13 +988,11 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -1548,78 +1003,81 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
-k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
-k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE=
-k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw=
-k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
-k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/cli-runtime v0.30.2 h1:ooM40eEJusbgHNEqnHziN9ZpLN5U4WcQGsdLKVxpkKE=
-k8s.io/cli-runtime v0.30.2/go.mod h1:Y4g/2XezFyTATQUbvV5WaChoUGhojv/jZAtdp5Zkm0A=
-k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
-k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
-k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII=
-k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE=
+k8s.io/api v0.34.3 h1:D12sTP257/jSH2vHV2EDYrb16bS7ULlHpdNdNhEw2S4=
+k8s.io/api v0.34.3/go.mod h1:PyVQBF886Q5RSQZOim7DybQjAbVs8g7gwJNhGtY5MBk=
+k8s.io/apiextensions-apiserver v0.34.3 h1:p10fGlkDY09eWKOTeUSioxwLukJnm+KuDZdrW71y40g=
+k8s.io/apiextensions-apiserver v0.34.3/go.mod h1:aujxvqGFRdb/cmXYfcRTeppN7S2XV/t7WMEc64zB5A0=
+k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
+k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/cli-runtime v0.34.3 h1:YRyMhiwX0dT9lmG0AtZDaeG33Nkxgt9OlCTZhRXj9SI=
+k8s.io/cli-runtime v0.34.3/go.mod h1:GVwL1L5uaGEgM7eGeKjaTG2j3u134JgG4dAI6jQKhMc=
+k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
+k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
+k8s.io/component-base v0.34.3 h1:zsEgw6ELqK0XncCQomgO9DpUIzlrYuZYA0Cgo+JWpVk=
+k8s.io/component-base v0.34.3/go.mod h1:5iIlD8wPfWE/xSHTRfbjuvUul2WZbI2nOUK65XL0E/c=
+k8s.io/component-helpers v0.34.3 h1:Iws1GQfM89Lxo7IZITGmVdFOW0Bmyd7SVwwIu1/CCkE=
+k8s.io/component-helpers v0.34.3/go.mod h1:S8HjjMTrUDVMVPo2EdNYRtQx9uIEIueQYdPMOe9UxJs=
+k8s.io/controller-manager v0.34.3 h1:pEW6ExR3FteKkYkKRrLoi0Sy8dcbvUTAReP8OTxK5k0=
+k8s.io/controller-manager v0.34.3/go.mod h1:YzXiwiubf6GdSC3ej2XFYhQQBwF5AvJq/3eymdsU9OU=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kms v0.30.2 h1:VSZILO/tkzrz5Tu2j+yFQZ2Dc5JerQZX2GqhFJbQrfw=
-k8s.io/kms v0.30.2/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
-k8s.io/kube-aggregator v0.30.2 h1:0+yk/ED6foCprY8VmkDPUhngjaAPKsNTXB/UrtvbIz0=
-k8s.io/kube-aggregator v0.30.2/go.mod h1:EhqCfDdxysNWXk1wRL9SEHAdo1DKl6EULQagztkBcXE=
-k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2 h1:T5TEV4a+pEjc+j9Xui3MGGeoDLIN6uzZrx8NYotFMgQ=
-k8s.io/kube-openapi v0.0.0-20240703190633-0aa61b46e8c2/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/kms v0.34.3 h1:QzBOD0sk1bGQVMcZQAHGjtbP1iKZJUyhC6D0I+BTxIE=
+k8s.io/kms v0.34.3/go.mod h1:s1CFkLG7w9eaTYvctOxosx88fl4spqmixnNpys0JAtM=
+k8s.io/kube-aggregator v0.34.3 h1:rKsZWTD2As4dKuv+zzdJU0uo5H7bFlAEoSucai4mW6M=
+k8s.io/kube-aggregator v0.34.3/go.mod h1:d4D8PV2FK4Qlq6u442FSum1tHPhK9tKdKBfH/A3R0I0=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
+k8s.io/kubernetes v1.34.3 h1:0TfljWbhEF5DBks+WFMSrvKfxBLo4vnZuqORjLMiyT4=
+k8s.io/kubernetes v1.34.3/go.mod h1:m6pZk6a179pRo2wsTiCPORJ86iOEQmfIzUvtyEF8BwA=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
kmodules.xyz/apiversion v0.2.0 h1:vAQYqZFm4xu4pbB1cAdHbFEPES6EQkcR4wc06xdTOWk=
kmodules.xyz/apiversion v0.2.0/go.mod h1:oPX8g8LvlPdPX3Yc5YvCzJHQnw3YF/X4/jdW0b1am80=
-kmodules.xyz/client-go v0.30.44 h1:mLOBXQhvCIhdega9WoN4Px/usqobuKTT2vOPQcbGhXQ=
-kmodules.xyz/client-go v0.30.44/go.mod h1:T9Kiu20wXEn65dLBQeegf4+y7oahJBR9ZJO2zGEVLIY=
-kmodules.xyz/constants v0.0.0-20230304030334-d2d1f28732a5 h1:wybpLbdInCO2QripCkVXFce37ESUWKHdWXHHFLXJchg=
-kmodules.xyz/constants v0.0.0-20230304030334-d2d1f28732a5/go.mod h1:3C5i73Z7fcMVyu5TXtPuizGD8vWAbesXFVp1ESbIa1k=
-kmodules.xyz/csi-utils v0.29.1 h1:O0WDb/rsqj+FwT3C/KshB9hhBTh84YCP3fVb1UGMuHw=
-kmodules.xyz/csi-utils v0.29.1/go.mod h1:tcUPTlt0NSlo0RdsHGvA5+LvXhC1WDloqVUkZrzN9MM=
-kmodules.xyz/custom-resources v0.30.0 h1:vR3CbseHMLwR4GvtcJJuRuwIV8voKqFqNii27rMcm1o=
-kmodules.xyz/custom-resources v0.30.0/go.mod h1:ZsTuI2mLG2s3byre7bHmpxJ9w0HDqAkRTL1+izGFI24=
-kmodules.xyz/go-containerregistry v0.0.12 h1:Tl32QGmSqRVm9PUEb/f3dgDeu9zW5fVzt3qmAFIE37I=
-kmodules.xyz/go-containerregistry v0.0.12/go.mod h1:KgeNg0hDsgeda+qc0NzWk0iVRdF0+ZIg/oRzGoYh78I=
-kmodules.xyz/objectstore-api v0.29.1 h1:uUsjf8KU0w4LYowSEOnl0AbHT3hsHIu1wNLHqGe1o6s=
-kmodules.xyz/objectstore-api v0.29.1/go.mod h1:xG+5awH1SXYKxwN/+k1FEQvzixd5tgNqEN/1LEiB2FE=
-kmodules.xyz/offshoot-api v0.30.1 h1:TrulAYO+oBsXe9sZZGTmNWIuI8qD2izMpgcTSPvgAmI=
-kmodules.xyz/offshoot-api v0.30.1/go.mod h1:T3mpjR6fui0QzOcmQvIuANytW48fe9ytmy/1cgx6D4g=
+kmodules.xyz/client-go v0.34.2 h1:2Cec+nyfj9kfbR+5KPK3AksxN6h4jSjhn/tw+Dhqggo=
+kmodules.xyz/client-go v0.34.2/go.mod h1:kQRuGMxhb+B9rVdcfBzjK+PV7oBDo+SaDiQ66u1QG+4=
+kmodules.xyz/constants v0.0.0-20250815043538-9de88de78858 h1:NP66IJ1q5eo3ETt+dbZ6lLFYp9Tbe0C3cMNda8zwadM=
+kmodules.xyz/constants v0.0.0-20250815043538-9de88de78858/go.mod h1:nokPwRw1HhtaJsWGsGznKfiQzsRvGmW4SLDLVq6AQ9I=
+kmodules.xyz/csi-utils v0.34.0 h1:qsYGTq9Ta8yWyTMHxaA10DS/g8rEbkyyeHX+Z5xY7Xk=
+kmodules.xyz/csi-utils v0.34.0/go.mod h1:WKDUxLqqLMkubR/Uk476c28/xzWckMCE1S6mQBmUegw=
+kmodules.xyz/custom-resources v0.34.0 h1:ljkIYzIq0A3Awj87kkpuYqS9aifuyR3Hr9q2OVKoojM=
+kmodules.xyz/custom-resources v0.34.0/go.mod h1:pcA/n/CnrycjKCRNtU9Z+l5svhzFncLY2Kn9pqeXDVs=
+kmodules.xyz/go-containerregistry v0.0.15 h1:PRY5FDOzb6u23KOulQ4SWNdeUkBKmezLyJXP88q4EPw=
+kmodules.xyz/go-containerregistry v0.0.15/go.mod h1:rO0DEbYYEu1BfVcZ1pXV+3RgzVXr/k5hXcO+BQYVVDI=
+kmodules.xyz/objectstore-api v0.34.0 h1:h16QGeKAJB27mj0rhvm3I0q0ulE99TgIulob/UkXzRc=
+kmodules.xyz/objectstore-api v0.34.0/go.mod h1:x6snmU8evi1K0qcWKrTXbkSN18hamcuGraYGCgtG9JY=
+kmodules.xyz/offshoot-api v0.34.0 h1:HnOOp8FrCjTWjtNApRDo6Ahe79tOlLrJmyye4xxO4Kk=
+kmodules.xyz/offshoot-api v0.34.0/go.mod h1:F+B59yYw4CZJ4uD4xu6C+mMLzIXUtuH7E+SbDICl9jE=
kmodules.xyz/openshift v0.29.0 h1:8PXjeQ+usUGkLnYUMSbZrMg+i1OIYBe9UWOeBf2FRzU=
kmodules.xyz/openshift v0.29.0/go.mod h1:neEinR+BcvP4vKKCkh9HpRK9e3c+upSst9E7rskZVuQ=
-kmodules.xyz/prober v0.29.0 h1:Ex7m4F9rH7uWNNJlLgP63ROOM+nUATJkC2L5OQ7nwMg=
-kmodules.xyz/prober v0.29.0/go.mod h1:UtK+HKyI1lFLEKX+HFLyOCVju6TO93zv3kwGpzqmKOo=
-kmodules.xyz/resource-metadata v0.24.3 h1:yGXm6G1YIXru3mgDqDWsCPhGfotT3xBTKy+jd0bpF+E=
-kmodules.xyz/resource-metadata v0.24.3/go.mod h1:rPUZSMR0e1Vi+gONQ2ZhOFW+GvUeK+1AI7h9fzTZoKI=
-kmodules.xyz/resource-metrics v0.30.5 h1:ZhpGeR9DCz1HTrKUg/mWhr95wlFzCPRdgVAqwaggy1o=
-kmodules.xyz/resource-metrics v0.30.5/go.mod h1:w9+rz7/s/kGP1GWzYSuRdCn+l7EwpesmESSEHkLBnIQ=
-kmodules.xyz/webhook-runtime v0.29.1 h1:SQ8NvwJpxv5CUuXIubSg9g0Bk8BBnj4dhCWEk6Ou8g8=
-kmodules.xyz/webhook-runtime v0.29.1/go.mod h1:WVgNO4NqFdYwYZ82L6QIycN1Ax47y+yXi7XIHekYylo=
+kmodules.xyz/prober v0.34.0 h1:ElZkZYCjLaytAA0M8EH42To7i9gh1IIX+d0qfaIohys=
+kmodules.xyz/prober v0.34.0/go.mod h1:rsu/fxxfNxY70GDbH6Ju8G66459hi7AhWSSBoiIp8ic=
+kmodules.xyz/resource-metadata v0.40.2 h1:2J+UvAaHXfqDStO2SKqeVkER4z/kSOqpj8Iyrc9+V4Y=
+kmodules.xyz/resource-metadata v0.40.2/go.mod h1:38+41aUSrWqrQDeaSITKoxAiGT0ysQk5yjRODqBadpw=
+kmodules.xyz/resource-metrics v0.34.0 h1:cqscgTx3PONxHj6PIySK3sTlKKv8iKTGzRd+S6YSwXg=
+kmodules.xyz/resource-metrics v0.34.0/go.mod h1:R34IKtp5+NqcQz7AQJheBJK6Iem0LqrCbm/55Mn+ECQ=
+kmodules.xyz/webhook-runtime v0.34.0 h1:EmETNISlE6C89H492+u6EGUB/5ub/vvC+l2fXpV2e2g=
+kmodules.xyz/webhook-runtime v0.34.0/go.mod h1:sBzU7/eqCcF788jzmiTFqR+v1VH0WSXuIWDRqNACNYg=
moul.io/http2curl/v2 v2.3.1-0.20221024080105-10c404f653f7 h1:NykkTlRB+X40z86cLHdEmuoTxhNKhQebLT379b1EumA=
moul.io/http2curl/v2 v2.3.1-0.20221024080105-10c404f653f7/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE=
-nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4=
-sigs.k8s.io/cli-utils v0.35.0 h1:dfSJaF1W0frW74PtjwiyoB4cwdRygbHnC7qe7HF0g/Y=
-sigs.k8s.io/cli-utils v0.35.0/go.mod h1:ITitykCJxP1vaj1Cew/FZEaVJ2YsTN9Q71m02jebkoE=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
-sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY=
-sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U=
-sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
-sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
-sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
-stash.appscode.dev/apimachinery v0.42.1-0.20251212062910-cc7cccc43100 h1:Y3Y70xEQc+/TtMEt8IW/VY7Iz1cNRfSc5+CcZGgKUK4=
-stash.appscode.dev/apimachinery v0.42.1-0.20251212062910-cc7cccc43100/go.mod h1:uxzHr2lfPtGuryxTwjvcS9H8A9WCMyUr6+0dQYJiTIA=
-x-helm.dev/apimachinery v0.0.16 h1:Eb160xcdH9fMVHak5QSWYWxoaReytch+A7kk25QWjx0=
-x-helm.dev/apimachinery v0.0.16/go.mod h1:05brgFw5oWOX7OTXT090SQojqXjbttqWfqoJo+ejBU4=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
+sigs.k8s.io/cli-utils v0.37.2 h1:GOfKw5RV2HDQZDJlru5KkfLO1tbxqMoyn1IYUxqBpNg=
+sigs.k8s.io/cli-utils v0.37.2/go.mod h1:V+IZZr4UoGj7gMJXklWBg6t5xbdThFBcpj4MrZuCYco=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=
+sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM=
+sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78=
+sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
+stash.appscode.dev/apimachinery v0.42.2-0.20251230090158-1034b727fe48 h1:wBJaQUeY5OAhB6UUg1BTfuFm3E1IWtlNEfgA/ya8Vsc=
+stash.appscode.dev/apimachinery v0.42.2-0.20251230090158-1034b727fe48/go.mod h1:TMNL2PGWlxWOQtspAPZZfIWJTHDmJBffqiVJ4dq4mLg=
+x-helm.dev/apimachinery v0.0.18 h1:UHrC0PGjeSRSRECXRaVUl4fIIwwyQSnNFCCAevSI14w=
+x-helm.dev/apimachinery v0.0.18/go.mod h1:C+M2A9cVmNhqP6ZGft4opUm1cPiWLHukKV5kWNkzXZs=
diff --git a/pkg/backup/backupsession.go b/pkg/backup/backupsession.go
index 95afe1a7f..13145a8fd 100644
--- a/pkg/backup/backupsession.go
+++ b/pkg/backup/backupsession.go
@@ -70,7 +70,7 @@ type BackupSessionController struct {
InvokerName string
Namespace string
// Backup Session
- bsQueue *queue.Worker
+ bsQueue *queue.Worker[any]
bsInformer cache.SharedIndexInformer
bsLister v1beta1.BackupSessionLister
@@ -134,7 +134,7 @@ func (c *BackupSessionController) runBackupSessionController(invokerRef *core.Ob
func (c *BackupSessionController) initBackupSessionWatcher() error {
c.bsInformer = c.StashInformerFactory.Stash().V1beta1().BackupSessions().Informer()
- c.bsQueue = queue.New(api_v1beta1.ResourceKindBackupSession, c.MaxNumRequeues, c.NumThreads, c.processBackupSession)
+ c.bsQueue = queue.New[any](api_v1beta1.ResourceKindBackupSession, c.MaxNumRequeues, c.NumThreads, c.processBackupSession)
_, _ = c.bsInformer.AddEventHandler(queue.NewFilteredHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj any) {
if backupsession, ok := obj.(*api_v1beta1.BackupSession); ok && c.selectedByLabels(backupsession) {
@@ -164,7 +164,8 @@ func (c *BackupSessionController) initBackupSessionWatcher() error {
// syncToStdout is the business logic of the controller. In this controller it simply prints
// information about the deployment to stdout. In case an error happened, it has to simply return the error.
// The retry logic should not be part of the business logic.
-func (c *BackupSessionController) processBackupSession(key string) error {
+func (c *BackupSessionController) processBackupSession(v any) error {
+ key := v.(string)
obj, exists, err := c.bsInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.Errorf("Fetching object with key %s from store failed with %v", key, err)
diff --git a/pkg/cmds/create_volumesnapshot.go b/pkg/cmds/create_volumesnapshot.go
index 83db13fd3..23d565107 100644
--- a/pkg/cmds/create_volumesnapshot.go
+++ b/pkg/cmds/create_volumesnapshot.go
@@ -31,8 +31,8 @@ import (
"stash.appscode.dev/stash/pkg/status"
"stash.appscode.dev/stash/pkg/volumesnapshot"
- vsapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
- vscs "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned"
+ vsapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
+ vscs "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
"github.com/spf13/cobra"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
diff --git a/pkg/cmds/restore_volumesnapshot.go b/pkg/cmds/restore_volumesnapshot.go
index b821de58f..b546757d5 100644
--- a/pkg/cmds/restore_volumesnapshot.go
+++ b/pkg/cmds/restore_volumesnapshot.go
@@ -31,7 +31,7 @@ import (
"stash.appscode.dev/stash/pkg/status"
"stash.appscode.dev/stash/pkg/util"
- vscs "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned"
+ vscs "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
"github.com/spf13/cobra"
"gomodules.xyz/pointer"
core "k8s.io/api/core/v1"
diff --git a/pkg/cmds/root.go b/pkg/cmds/root.go
index f27f7bfc2..9bb474fb2 100644
--- a/pkg/cmds/root.go
+++ b/pkg/cmds/root.go
@@ -45,8 +45,8 @@ func NewRootCmd() *cobra.Command {
}
rootCmd.AddCommand(v.NewCmdVersion())
- stopCh := genericapiserver.SetupSignalHandler()
- rootCmd.AddCommand(NewCmdRun(os.Stdout, os.Stderr, stopCh))
+ ctx := genericapiserver.SetupSignalContext()
+ rootCmd.AddCommand(NewCmdRun(ctx, os.Stdout, os.Stderr))
rootCmd.AddCommand(NewCmdSnapshots())
rootCmd.AddCommand(NewCmdForget())
diff --git a/pkg/cmds/run.go b/pkg/cmds/run.go
index fc61c8825..e64b2cb7a 100644
--- a/pkg/cmds/run.go
+++ b/pkg/cmds/run.go
@@ -17,6 +17,7 @@ limitations under the License.
package cmds
import (
+ "context"
"io"
"stash.appscode.dev/stash/pkg/cmds/server"
@@ -26,7 +27,7 @@ import (
"k8s.io/klog/v2"
)
-func NewCmdRun(out, errOut io.Writer, stopCh <-chan struct{}) *cobra.Command {
+func NewCmdRun(ctx context.Context, out, errOut io.Writer) *cobra.Command {
o := server.NewStashOptions(out, errOut)
cmd := &cobra.Command{
@@ -43,7 +44,7 @@ func NewCmdRun(out, errOut io.Writer, stopCh <-chan struct{}) *cobra.Command {
if err := o.Validate(args); err != nil {
return err
}
- if err := o.Run(stopCh); err != nil {
+ if err := o.Run(ctx); err != nil {
return err
}
return nil
diff --git a/pkg/cmds/server/start.go b/pkg/cmds/server/start.go
index 5e34dbc2e..1c75d08e1 100644
--- a/pkg/cmds/server/start.go
+++ b/pkg/cmds/server/start.go
@@ -17,6 +17,7 @@ limitations under the License.
package server
import (
+ "context"
"fmt"
"io"
"net"
@@ -30,10 +31,9 @@ import (
admissionv1beta1 "k8s.io/api/admission/v1beta1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
openapinamer "k8s.io/apiserver/pkg/endpoints/openapi"
- "k8s.io/apiserver/pkg/features"
genericapiserver "k8s.io/apiserver/pkg/server"
genericoptions "k8s.io/apiserver/pkg/server/options"
- "k8s.io/apiserver/pkg/util/feature"
+ basecompatibility "k8s.io/component-base/compatibility"
"kmodules.xyz/client-go/tools/clientcmd"
)
@@ -48,7 +48,6 @@ type StashOptions struct {
}
func NewStashOptions(out, errOut io.Writer) *StashOptions {
- _ = feature.DefaultMutableFeatureGate.Set(fmt.Sprintf("%s=false", features.APIPriorityAndFairness))
o := &StashOptions{
// TODO we will nil out the etcd storage options. This requires a later level of k8s.io/apiserver
RecommendedOptions: genericoptions.NewRecommendedOptions(
@@ -109,6 +108,8 @@ func (o StashOptions) Config() (*server.StashConfig, error) {
"/apis/admission.stash.appscode.com/v1beta1/backupconfigurationvalidators",
}
+ serverConfig.EffectiveVersion = basecompatibility.NewEffectiveVersionFromString("v1.0.0", "", "")
+
serverConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(v1alpha1.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(server.Scheme))
serverConfig.OpenAPIConfig.Info.Title = "stash-webhook-server"
serverConfig.OpenAPIConfig.Info.Version = v1alpha1.SchemeGroupVersion.Version
@@ -131,7 +132,7 @@ func (o StashOptions) Config() (*server.StashConfig, error) {
return config, nil
}
-func (o StashOptions) Run(stopCh <-chan struct{}) error {
+func (o StashOptions) Run(ctx context.Context) error {
config, err := o.Config()
if err != nil {
return err
@@ -144,7 +145,7 @@ func (o StashOptions) Run(stopCh <-chan struct{}) error {
// Start periodic license verification
//nolint:errcheck
- go licenseEnforcer.VerifyLicensePeriodically(config.ExtraConfig.ClientConfig, o.ExtraOptions.LicenseFile, stopCh)
+ go licenseEnforcer.VerifyLicensePeriodically(config.ExtraConfig.ClientConfig, o.ExtraOptions.LicenseFile, ctx.Done())
- return s.Run(stopCh)
+ return s.Run(ctx)
}
diff --git a/pkg/controller/backup_configuration.go b/pkg/controller/backup_configuration.go
index 8eea3fada..1e9c279fa 100644
--- a/pkg/controller/backup_configuration.go
+++ b/pkg/controller/backup_configuration.go
@@ -112,7 +112,7 @@ func (c *StashController) validateBackupConfiguration(bc *api_v1beta1.BackupConf
func (c *StashController) initBackupConfigurationWatcher() {
c.bcInformer = c.stashInformerFactory.Stash().V1beta1().BackupConfigurations().Informer()
- c.bcQueue = queue.New(api_v1beta1.ResourceKindBackupConfiguration, c.MaxNumRequeues, c.NumThreads, c.runBackupConfigurationProcessor)
+ c.bcQueue = queue.New[any](api_v1beta1.ResourceKindBackupConfiguration, c.MaxNumRequeues, c.NumThreads, c.runBackupConfigurationProcessor)
if c.auditor != nil {
c.auditor.ForGVK(c.bcInformer, api_v1beta1.SchemeGroupVersion.WithKind(api_v1beta1.ResourceKindBackupConfiguration))
}
@@ -130,7 +130,8 @@ func (c *StashController) initBackupConfigurationWatcher() {
// syncToStdout is the business logic of the controller. In this controller it simply prints
// information about the deployment to stdout. In case an error happened, it has to simply return the error.
// The retry logic should not be part of the business logic.
-func (c *StashController) runBackupConfigurationProcessor(key string) error {
+func (c *StashController) runBackupConfigurationProcessor(v any) error {
+ key := v.(string)
obj, exists, err := c.bcInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.ErrorS(err, "Failed to fetch object from indexer",
diff --git a/pkg/controller/backup_session.go b/pkg/controller/backup_session.go
index 4a1176ec3..47d5adb81 100644
--- a/pkg/controller/backup_session.go
+++ b/pkg/controller/backup_session.go
@@ -91,7 +91,7 @@ func (c *StashController) NewBackupSessionWebhook() hooks.AdmissionHook {
func (c *StashController) initBackupSessionWatcher() {
c.backupSessionInformer = c.stashInformerFactory.Stash().V1beta1().BackupSessions().Informer()
- c.backupSessionQueue = queue.New(api_v1beta1.ResourceKindBackupSession, c.MaxNumRequeues, c.NumThreads, c.processBackupSessionEvent)
+ c.backupSessionQueue = queue.New[any](api_v1beta1.ResourceKindBackupSession, c.MaxNumRequeues, c.NumThreads, c.processBackupSessionEvent)
if c.auditor != nil {
c.auditor.ForGVK(c.backupSessionInformer, api_v1beta1.SchemeGroupVersion.WithKind(api_v1beta1.ResourceKindBackupSession))
}
@@ -99,7 +99,8 @@ func (c *StashController) initBackupSessionWatcher() {
c.backupSessionLister = c.stashInformerFactory.Stash().V1beta1().BackupSessions().Lister()
}
-func (c *StashController) processBackupSessionEvent(key string) error {
+func (c *StashController) processBackupSessionEvent(v any) error {
+ key := v.(string)
obj, exists, err := c.backupSessionInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.ErrorS(err, "Failed to fetch object from indexer",
diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go
index 4de3cf5a9..dd2552ca1 100644
--- a/pkg/controller/controller.go
+++ b/pkg/controller/controller.go
@@ -63,47 +63,47 @@ type StashController struct {
stashInformerFactory stashinformers.SharedInformerFactory
// Repository
- repoQueue *queue.Worker
+ repoQueue *queue.Worker[any]
repoInformer cache.SharedIndexInformer
repoLister stash_listers.RepositoryLister
// Deployment
- dpQueue *queue.Worker
+ dpQueue *queue.Worker[any]
dpInformer cache.SharedIndexInformer
dpLister apps_listers.DeploymentLister
// DaemonSet
- dsQueue *queue.Worker
+ dsQueue *queue.Worker[any]
dsInformer cache.SharedIndexInformer
dsLister apps_listers.DaemonSetLister
// StatefulSet
- ssQueue *queue.Worker
+ ssQueue *queue.Worker[any]
ssInformer cache.SharedIndexInformer
ssLister apps_listers.StatefulSetLister
// Job
- jobQueue *queue.Worker
+ jobQueue *queue.Worker[any]
jobInformer cache.SharedIndexInformer
jobLister batch_listers.JobLister
// BackupConfiguration
- bcQueue *queue.Worker
+ bcQueue *queue.Worker[any]
bcInformer cache.SharedIndexInformer
bcLister stash_listers_v1beta1.BackupConfigurationLister
// BackupSession
- backupSessionQueue *queue.Worker
+ backupSessionQueue *queue.Worker[any]
backupSessionInformer cache.SharedIndexInformer
backupSessionLister stash_listers_v1beta1.BackupSessionLister
// RestoreSession
- restoreSessionQueue *queue.Worker
+ restoreSessionQueue *queue.Worker[any]
restoreSessionInformer cache.SharedIndexInformer
restoreSessionLister stash_listers_v1beta1.RestoreSessionLister
// Openshift DeploymentConfiguration
- dcQueue *queue.Worker
+ dcQueue *queue.Worker[any]
dcInformer cache.SharedIndexInformer
dcLister oc_listers.DeploymentConfigLister
}
diff --git a/pkg/controller/daemonsets.go b/pkg/controller/daemonsets.go
index a6daf998a..7452966f1 100644
--- a/pkg/controller/daemonsets.go
+++ b/pkg/controller/daemonsets.go
@@ -81,7 +81,7 @@ func (c *StashController) NewDaemonSetWebhook() hooks.AdmissionHook {
func (c *StashController) initDaemonSetWatcher() {
c.dsInformer = c.kubeInformerFactory.Apps().V1().DaemonSets().Informer()
- c.dsQueue = queue.New("DaemonSet", c.MaxNumRequeues, c.NumThreads, c.processDaemonSetEvent)
+ c.dsQueue = queue.New[any]("DaemonSet", c.MaxNumRequeues, c.NumThreads, c.processDaemonSetEvent)
_, _ = c.dsInformer.AddEventHandler(queue.DefaultEventHandler(c.dsQueue.GetQueue(), core.NamespaceAll))
c.dsLister = c.kubeInformerFactory.Apps().V1().DaemonSets().Lister()
}
@@ -89,7 +89,8 @@ func (c *StashController) initDaemonSetWatcher() {
// syncToStdout is the business logic of the controller. In this controller it simply prints
// information about the daemonset to stdout. In case an error happened, it has to simply return the error.
// The retry logic should not be part of the business logic.
-func (c *StashController) processDaemonSetEvent(key string) error {
+func (c *StashController) processDaemonSetEvent(v any) error {
+ key := v.(string)
obj, exists, err := c.dsInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.ErrorS(err, "Failed to fetch object from indexer",
diff --git a/pkg/controller/deployment.go b/pkg/controller/deployment.go
index f834b85fc..08c4361a6 100644
--- a/pkg/controller/deployment.go
+++ b/pkg/controller/deployment.go
@@ -81,12 +81,13 @@ func (c *StashController) NewDeploymentWebhook() hooks.AdmissionHook {
func (c *StashController) initDeploymentWatcher() {
c.dpInformer = c.kubeInformerFactory.Apps().V1().Deployments().Informer()
- c.dpQueue = queue.New("Deployment", c.MaxNumRequeues, c.NumThreads, c.processDeploymentEvent)
+ c.dpQueue = queue.New[any]("Deployment", c.MaxNumRequeues, c.NumThreads, c.processDeploymentEvent)
_, _ = c.dpInformer.AddEventHandler(queue.DefaultEventHandler(c.dpQueue.GetQueue(), core.NamespaceAll))
c.dpLister = c.kubeInformerFactory.Apps().V1().Deployments().Lister()
}
-func (c *StashController) processDeploymentEvent(key string) error {
+func (c *StashController) processDeploymentEvent(v any) error {
+ key := v.(string)
obj, exists, err := c.dpInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.ErrorS(err, "Failed to fetch object from indexer",
diff --git a/pkg/controller/deploymentconfiguration.go b/pkg/controller/deploymentconfiguration.go
index ace568ede..2fde99615 100644
--- a/pkg/controller/deploymentconfiguration.go
+++ b/pkg/controller/deploymentconfiguration.go
@@ -86,7 +86,7 @@ func (c *StashController) initDeploymentConfigWatcher() {
return
}
c.dcInformer = c.ocInformerFactory.Apps().V1().DeploymentConfigs().Informer()
- c.dcQueue = queue.New(apis.KindDeploymentConfig, c.MaxNumRequeues, c.NumThreads, c.processDeploymentConfigEvent)
+ c.dcQueue = queue.New[any](apis.KindDeploymentConfig, c.MaxNumRequeues, c.NumThreads, c.processDeploymentConfigEvent)
_, _ = c.dcInformer.AddEventHandler(queue.DefaultEventHandler(c.dcQueue.GetQueue(), core.NamespaceAll))
c.dcLister = c.ocInformerFactory.Apps().V1().DeploymentConfigs().Lister()
}
@@ -94,7 +94,8 @@ func (c *StashController) initDeploymentConfigWatcher() {
// syncToStdout is the business logic of the controller. In this controller it simply prints
// information about the deployment to stdout. In case an error happened, it has to simply return the error.
// The retry logic should not be part of the business logic.
-func (c *StashController) processDeploymentConfigEvent(key string) error {
+func (c *StashController) processDeploymentConfigEvent(v any) error {
+ key := v.(string)
obj, exists, err := c.dcInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.ErrorS(err, "Failed to fetch object from indexer",
diff --git a/pkg/controller/jobs.go b/pkg/controller/jobs.go
index 4fcf8feca..1c19c25f4 100644
--- a/pkg/controller/jobs.go
+++ b/pkg/controller/jobs.go
@@ -49,12 +49,13 @@ func (c *StashController) initJobWatcher() {
},
)
})
- c.jobQueue = queue.New("Job", c.MaxNumRequeues, c.NumThreads, c.runJobInjector)
+ c.jobQueue = queue.New[any]("Job", c.MaxNumRequeues, c.NumThreads, c.runJobInjector)
_, _ = c.jobInformer.AddEventHandler(queue.DefaultEventHandler(c.jobQueue.GetQueue(), core.NamespaceAll))
c.jobLister = c.kubeInformerFactory.Batch().V1().Jobs().Lister()
}
-func (c *StashController) runJobInjector(key string) error {
+func (c *StashController) runJobInjector(v any) error {
+ key := v.(string)
obj, exists, err := c.jobInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.ErrorS(err, "Failed to fetch object from indexer",
diff --git a/pkg/controller/repository.go b/pkg/controller/repository.go
index 0614d7034..f90bfe38f 100644
--- a/pkg/controller/repository.go
+++ b/pkg/controller/repository.go
@@ -37,7 +37,7 @@ import (
"k8s.io/klog/v2"
core_util "kmodules.xyz/client-go/core/v1"
"kmodules.xyz/client-go/tools/queue"
- "kmodules.xyz/objectstore-api/osm"
+ "kmodules.xyz/objectstore-api/pkg/osm"
"kmodules.xyz/webhook-runtime/admission"
hooks "kmodules.xyz/webhook-runtime/admission/v1beta1"
webhook "kmodules.xyz/webhook-runtime/admission/v1beta1/generic"
@@ -73,7 +73,7 @@ func (c *StashController) NewRepositoryWebhook() hooks.AdmissionHook {
func (c *StashController) initRepositoryWatcher() {
c.repoInformer = c.stashInformerFactory.Stash().V1alpha1().Repositories().Informer()
- c.repoQueue = queue.New(api_v1alpha1.ResourceKindRepository, c.MaxNumRequeues, c.NumThreads, c.runRepositoryReconciler)
+ c.repoQueue = queue.New[any](api_v1alpha1.ResourceKindRepository, c.MaxNumRequeues, c.NumThreads, c.runRepositoryReconciler)
if c.auditor != nil {
c.auditor.ForGVK(c.repoInformer, api_v1alpha1.SchemeGroupVersion.WithKind(api_v1alpha1.ResourceKindRepository))
}
@@ -81,7 +81,8 @@ func (c *StashController) initRepositoryWatcher() {
c.repoLister = c.stashInformerFactory.Stash().V1alpha1().Repositories().Lister()
}
-func (c *StashController) runRepositoryReconciler(key string) error {
+func (c *StashController) runRepositoryReconciler(v any) error {
+ key := v.(string)
obj, exist, err := c.repoInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.ErrorS(err, "Failed to fetch object from indexer",
diff --git a/pkg/controller/restore_session.go b/pkg/controller/restore_session.go
index 520f416e2..9167b15d7 100644
--- a/pkg/controller/restore_session.go
+++ b/pkg/controller/restore_session.go
@@ -129,7 +129,7 @@ func (c *StashController) NewRestoreSessionMutator() hooks.AdmissionHook {
// process only add events
func (c *StashController) initRestoreSessionWatcher() {
c.restoreSessionInformer = c.stashInformerFactory.Stash().V1beta1().RestoreSessions().Informer()
- c.restoreSessionQueue = queue.New(api_v1beta1.ResourceKindRestoreSession, c.MaxNumRequeues, c.NumThreads, c.processRestoreSessionEvent)
+ c.restoreSessionQueue = queue.New[any](api_v1beta1.ResourceKindRestoreSession, c.MaxNumRequeues, c.NumThreads, c.processRestoreSessionEvent)
if c.auditor != nil {
c.auditor.ForGVK(c.restoreSessionInformer, api_v1beta1.SchemeGroupVersion.WithKind(api_v1beta1.ResourceKindRestoreSession))
}
@@ -137,7 +137,8 @@ func (c *StashController) initRestoreSessionWatcher() {
c.restoreSessionLister = c.stashInformerFactory.Stash().V1beta1().RestoreSessions().Lister()
}
-func (c *StashController) processRestoreSessionEvent(key string) error {
+func (c *StashController) processRestoreSessionEvent(v any) error {
+ key := v.(string)
obj, exists, err := c.restoreSessionInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.ErrorS(err, "Failed to fetch object from indexer",
diff --git a/pkg/controller/statefulsets.go b/pkg/controller/statefulsets.go
index 155128693..4f1c1a03e 100644
--- a/pkg/controller/statefulsets.go
+++ b/pkg/controller/statefulsets.go
@@ -81,7 +81,7 @@ func (c *StashController) NewStatefulSetWebhook() hooks.AdmissionHook {
func (c *StashController) initStatefulSetWatcher() {
c.ssInformer = c.kubeInformerFactory.Apps().V1().StatefulSets().Informer()
- c.ssQueue = queue.New("StatefulSet", c.MaxNumRequeues, c.NumThreads, c.processStatefulSetEvent)
+ c.ssQueue = queue.New[any]("StatefulSet", c.MaxNumRequeues, c.NumThreads, c.processStatefulSetEvent)
_, _ = c.ssInformer.AddEventHandler(queue.DefaultEventHandler(c.ssQueue.GetQueue(), core.NamespaceAll))
c.ssLister = c.kubeInformerFactory.Apps().V1().StatefulSets().Lister()
}
@@ -89,7 +89,8 @@ func (c *StashController) initStatefulSetWatcher() {
// syncToStdout is the business logic of the controller. In this controller it simply prints
// information about the deployment to stdout. In case an error happened, it has to simply return the error.
// The retry logic should not be part of the business logic.
-func (c *StashController) processStatefulSetEvent(key string) error {
+func (c *StashController) processStatefulSetEvent(v any) error {
+ key := v.(string)
obj, exists, err := c.ssInformer.GetIndexer().GetByKey(key)
if err != nil {
klog.ErrorS(err, "Failed to fetch object from indexer",
diff --git a/pkg/rbac/volume_snapshot.go b/pkg/rbac/volume_snapshot.go
index 95c3a1b76..d9666adab 100644
--- a/pkg/rbac/volume_snapshot.go
+++ b/pkg/rbac/volume_snapshot.go
@@ -24,7 +24,7 @@ import (
api_v1alpha1 "stash.appscode.dev/apimachinery/apis/stash/v1alpha1"
api_v1beta1 "stash.appscode.dev/apimachinery/apis/stash/v1beta1"
- vsapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ vsapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
diff --git a/pkg/server/server.go b/pkg/server/server.go
index 93fcb6317..06ffe2d89 100644
--- a/pkg/server/server.go
+++ b/pkg/server/server.go
@@ -37,7 +37,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
- "k8s.io/apimachinery/pkg/version"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/client-go/kubernetes"
@@ -88,13 +87,13 @@ type StashServer struct {
Controller *controller.StashController
}
-func (op *StashServer) Run(stopCh <-chan struct{}) error {
+func (op *StashServer) Run(ctx context.Context) error {
if err := op.Controller.MigrateObservedGeneration(); err != nil {
return fmt.Errorf("failed to migrate observedGeneration to int64 for existing objects. Reason: %v", err)
}
// sync cache
- go op.Controller.Run(stopCh)
- return op.GenericAPIServer.PrepareRun().Run(stopCh)
+ go op.Controller.Run(ctx.Done())
+ return op.GenericAPIServer.PrepareRun().RunWithContext(ctx)
}
type completedConfig struct {
@@ -113,12 +112,6 @@ func (c *StashConfig) Complete() CompletedConfig {
c.GenericConfig.Complete(),
c.ExtraConfig,
}
-
- completedCfg.GenericConfig.Version = &version.Info{
- Major: "1",
- Minor: "1",
- }
-
return CompletedConfig{&completedCfg}
}
@@ -206,7 +199,7 @@ func (c completedConfig) New() (*StashServer, error) {
}
s.GenericAPIServer.AddPostStartHookOrDie(postStartName,
func(context genericapiserver.PostStartHookContext) error {
- return admissionHook.Initialize(c.ExtraConfig.ClientConfig, context.StopCh)
+ return admissionHook.Initialize(c.ExtraConfig.ClientConfig, context.Done())
},
)
}
@@ -237,7 +230,7 @@ func (c completedConfig) New() (*StashServer, error) {
},
},
},
- }, ctx.StopCh)
+ }, ctx.Done())
if err := xray.IsActive(context.TODO()); err != nil {
w, _, e2 := dynamic_util.DetectWorkload(
context.TODO(),
diff --git a/pkg/volumesnapshot/volumesnapshot_cleanup_policy.go b/pkg/volumesnapshot/volumesnapshot_cleanup_policy.go
index 0fc001bd1..8026b30e5 100644
--- a/pkg/volumesnapshot/volumesnapshot_cleanup_policy.go
+++ b/pkg/volumesnapshot/volumesnapshot_cleanup_policy.go
@@ -24,8 +24,8 @@ import (
"stash.appscode.dev/apimachinery/apis/stash/v1alpha1"
"stash.appscode.dev/apimachinery/apis/stash/v1beta1"
- vsapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
- vscs "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned"
+ vsapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
+ vscs "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned"
kerr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
diff --git a/pkg/volumesnapshot/volumesnapshot_cleanup_policy_test.go b/pkg/volumesnapshot/volumesnapshot_cleanup_policy_test.go
index b5337d956..04e3ea722 100644
--- a/pkg/volumesnapshot/volumesnapshot_cleanup_policy_test.go
+++ b/pkg/volumesnapshot/volumesnapshot_cleanup_policy_test.go
@@ -25,8 +25,8 @@ import (
"stash.appscode.dev/apimachinery/apis/stash/v1alpha1"
"stash.appscode.dev/apimachinery/apis/stash/v1beta1"
- vsapi "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
- vsfake "github.com/kubernetes-csi/external-snapshotter/client/v7/clientset/versioned/fake"
+ vsapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1"
+ vsfake "github.com/kubernetes-csi/external-snapshotter/client/v8/clientset/versioned/fake"
type_util "gomodules.xyz/pointer"
"gomodules.xyz/x/strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/test/e2e/framework/repository.go b/test/e2e/framework/repository.go
index 6d923553e..612db751d 100644
--- a/test/e2e/framework/repository.go
+++ b/test/e2e/framework/repository.go
@@ -35,7 +35,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"kmodules.xyz/client-go/tools/portforward"
store "kmodules.xyz/objectstore-api/api/v1"
- "kmodules.xyz/objectstore-api/osm"
+ "kmodules.xyz/objectstore-api/pkg/osm"
)
type KindMetaReplicas struct {
diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion
new file mode 100644
index 000000000..13c50892b
--- /dev/null
+++ b/vendor/cel.dev/expr/.bazelversion
@@ -0,0 +1,2 @@
+7.3.2
+# Keep this pinned version in parity with cel-go
diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes
new file mode 100644
index 000000000..3de1ec213
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitattributes
@@ -0,0 +1,2 @@
+*.pb.go linguist-generated=true
+*.pb.go -diff -merge
diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore
new file mode 100644
index 000000000..0d4fed27c
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitignore
@@ -0,0 +1,2 @@
+bazel-*
+MODULE.bazel.lock
diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel
new file mode 100644
index 000000000..37d8adc95
--- /dev/null
+++ b/vendor/cel.dev/expr/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..59908e2d8
--- /dev/null
+++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
@@ -0,0 +1,25 @@
+# Contributor Code of Conduct
+## Version 0.1.1 (adapted from 0.3b-angular)
+
+As contributors and maintainers of the Common Expression Language
+(CEL) project, we pledge to respect everyone who contributes by
+posting issues, updating documentation, submitting pull requests,
+providing feedback in comments, and any other activities.
+
+Communication through any of CEL's channels (GitHub, Gitter, IRC,
+mailing lists, Google+, Twitter, etc.) must be constructive and never
+resort to personal attacks, trolling, public or private harassment,
+insults, or other unprofessional conduct.
+
+We promise to extend courtesy and respect to everyone involved in this
+project regardless of gender, gender identity, sexual orientation,
+disability, age, race, ethnicity, religion, or level of experience. We
+expect anyone contributing to the project to do the same.
+
+If any member of the community violates this code of conduct, the
+maintainers of the CEL project may take action, removing issues,
+comments, and PRs or blocking accounts as deemed appropriate.
+
+If you are subject to or witness unacceptable behavior, or have any
+other concerns, please email us at
+[cel-conduct@google.com](mailto:cel-conduct@google.com).
diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md
new file mode 100644
index 000000000..8f5fd5c31
--- /dev/null
+++ b/vendor/cel.dev/expr/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are a
+few guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## What to expect from maintainers
+
+Expect maintainers to respond to new issues or pull requests within a week.
+For outstanding and ongoing issues and particularly for long-running
+pull requests, expect the maintainers to review within a week of a
+contributor asking for a new review. There is no commitment to resolution --
+merging or closing a pull request, or fixing or closing an issue -- because some
+issues will require more discussion than others.
diff --git a/vendor/cel.dev/expr/GOVERNANCE.md b/vendor/cel.dev/expr/GOVERNANCE.md
new file mode 100644
index 000000000..0a525bc17
--- /dev/null
+++ b/vendor/cel.dev/expr/GOVERNANCE.md
@@ -0,0 +1,43 @@
+# Project Governance
+
+This document defines the governance process for the CEL language. CEL is
+Google-developed, but openly governed. Major contributors to the CEL
+specification and its corresponding implementations constitute the CEL
+Language Council. New members may be added by a unanimous vote of the
+Council.
+
+The MAINTAINERS.md file lists the members of the CEL Language Council, and
+unofficially indicates the "areas of expertise" of each member with respect
+to the publicly available CEL repos.
+
+## Code Changes
+
+Code changes must follow the standard pull request (PR) model documented in the
+CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
+maintainer. The maintainer reserves the right to request that any feature
+request (FR) or PR be reviewed by the language council.
+
+## Syntax and Semantic Changes
+
+Syntactic and semantic changes must be reviewed by the CEL Language Council.
+Maintainers may also request language council review at their discretion.
+
+The review process is as follows:
+
+- Create a Feature Request in the CEL-Spec repo. The feature description will
+ serve as an abstract for the detailed design document.
+- Co-develop a design document with the Language Council.
+- Once the proposer gives the design document approval, the document will be
+ linked to the FR in the CEL-Spec repo and opened for comments to members of
+ the cel-lang-discuss@googlegroups.com.
+- The Language Council will review the design doc at the next council meeting
+ (once every three weeks) and the council decision included in the document.
+
+If the proposal is approved, the spec will be updated by a maintainer (if
+applicable) and a rationale will be included in the CEL-Spec wiki to ensure
+future developers may follow CEL's growth and direction over time.
+
+Approved proposals may be implemented by the proposer or by the maintainers as
+the parties see fit. At the discretion of the maintainer, changes from the
+approved design are permitted during implementation if they improve the user
+experience and clarity of the feature.
diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/cel.dev/expr/LICENSE
similarity index 100%
rename from vendor/github.com/google/shlex/COPYING
rename to vendor/cel.dev/expr/LICENSE
diff --git a/vendor/cel.dev/expr/MAINTAINERS.md b/vendor/cel.dev/expr/MAINTAINERS.md
new file mode 100644
index 000000000..1ed2eb8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/MAINTAINERS.md
@@ -0,0 +1,13 @@
+# CEL Language Council
+
+| Name | Company | Area of Expertise |
+|-----------------|--------------|-------------------|
+| Alfred Fuller | Facebook | cel-cpp, cel-spec |
+| Jim Larson | Google | cel-go, cel-spec |
+| Matthais Blume | Google | cel-spec |
+| Tristan Swadell | Google | cel-go, cel-spec |
+
+## Emeritus
+
+* Sanjay Ghemawat (Google)
+* Wolfgang Grieskamp (Facebook)
diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel
new file mode 100644
index 000000000..85ac9ff61
--- /dev/null
+++ b/vendor/cel.dev/expr/MODULE.bazel
@@ -0,0 +1,74 @@
+module(
+ name = "cel-spec",
+)
+
+bazel_dep(
+ name = "bazel_skylib",
+ version = "1.7.1",
+)
+bazel_dep(
+ name = "gazelle",
+ version = "0.39.1",
+ repo_name = "bazel_gazelle",
+)
+bazel_dep(
+ name = "googleapis",
+ version = "0.0.0-20241220-5e258e33.bcr.1",
+ repo_name = "com_google_googleapis",
+)
+bazel_dep(
+ name = "googleapis-cc",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-java",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-go",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "protobuf",
+ version = "27.0",
+ repo_name = "com_google_protobuf",
+)
+bazel_dep(
+ name = "rules_cc",
+ version = "0.0.17",
+)
+bazel_dep(
+ name = "rules_go",
+ version = "0.53.0",
+ repo_name = "io_bazel_rules_go",
+)
+bazel_dep(
+ name = "rules_java",
+ version = "7.6.5",
+)
+bazel_dep(
+ name = "rules_proto",
+ version = "7.0.2",
+)
+bazel_dep(
+ name = "rules_python",
+ version = "0.35.0",
+)
+
+### PYTHON ###
+python = use_extension("@rules_python//python/extensions:python.bzl", "python")
+python.toolchain(
+ ignore_root_user_error = True,
+ python_version = "3.11",
+)
+
+go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
+go_sdk.download(version = "1.22.0")
+
+go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
+go_deps.from_file(go_mod = "//:go.mod")
+use_repo(
+ go_deps,
+ "org_golang_google_genproto_googleapis_rpc",
+ "org_golang_google_protobuf",
+)
diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md
new file mode 100644
index 000000000..42d67f87c
--- /dev/null
+++ b/vendor/cel.dev/expr/README.md
@@ -0,0 +1,71 @@
+# Common Expression Language
+
+The Common Expression Language (CEL) implements common semantics for expression
+evaluation, enabling different applications to more easily interoperate.
+
+Key Applications
+
+* Security policy: organizations have complex infrastructure and need common
+ tooling to reason about the system as a whole
+* Protocols: expressions are a useful data type and require interoperability
+ across programming languages and platforms.
+
+
+Guiding philosophy:
+
+1. Keep it small & fast.
+ * CEL evaluates in linear time, is mutation free, and not Turing-complete.
+ This limitation is a feature of the language design, which allows the
+ implementation to evaluate orders of magnitude faster than equivalently
+ sandboxed JavaScript.
+2. Make it extensible.
+ * CEL is designed to be embedded in applications, and allows for
+ extensibility via its context which allows for functions and data to be
+ provided by the software that embeds it.
+3. Developer-friendly.
+ * The language is approachable to developers. The initial spec was based
+ on the experience of developing Firebase Rules and usability testing
+ many prior iterations.
+ * The library itself and accompanying toolings should be easy to adopt by
+ teams that seek to integrate CEL into their platforms.
+
+The required components of a system that supports CEL are:
+
+* The textual representation of an expression as written by a developer. It is
+ of similar syntax to expressions in C/C++/Java/JavaScript
+* A representation of the program's abstract syntax tree (AST).
+* A compiler library that converts the textual representation to the binary
+ representation. This can be done ahead of time (in the control plane) or
+ just before evaluation (in the data plane).
+* A context containing one or more typed variables, often protobuf messages.
+ Most use-cases will use `attribute_context.proto`
+* An evaluator library that takes the binary format in the context and
+ produces a result, usually a Boolean.
+
+For use cases which require persistence or cross-process communcation, it is
+highly recommended to serialize the type-checked expression as a protocol
+buffer. The CEL team will maintains canonical protocol buffers for ASTs and
+will keep these versions identical and wire-compatible in perpetuity:
+
+* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr)
+* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1)
+
+
+Example of boolean conditions and object construction:
+
+``` c
+// Condition
+account.balance >= transaction.withdrawal
+ || (account.overdraftProtection
+ && account.overdraftLimit >= transaction.withdrawal - account.balance)
+
+// Object construction
+common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
+```
+
+For more detail, see:
+
+* [Introduction](doc/intro.md)
+* [Language Definition](doc/langdef.md)
+
+Released under the [Apache License](LICENSE).
diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE
new file mode 100644
index 000000000..b6dc9ed67
--- /dev/null
+++ b/vendor/cel.dev/expr/WORKSPACE
@@ -0,0 +1,145 @@
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "rules_proto",
+ sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
+ strip_prefix = "rules_proto-4.0.0-3.20.0",
+ urls = [
+ "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
+ ],
+)
+
+# googleapis as of 09/16/2024
+http_archive(
+ name = "com_google_googleapis",
+ strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee",
+ sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8",
+ urls = [
+ "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz",
+ ],
+)
+
+# protobuf
+http_archive(
+ name = "com_google_protobuf",
+ sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
+ strip_prefix = "protobuf-3.21.5",
+ urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
+)
+
+# googletest
+http_archive(
+ name = "com_google_googletest",
+ urls = ["https://github.com/google/googletest/archive/master.zip"],
+ strip_prefix = "googletest-master",
+)
+
+# gflags
+http_archive(
+ name = "com_github_gflags_gflags",
+ sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
+ strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
+ urls = [
+ "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ ],
+)
+
+# glog
+http_archive(
+ name = "com_google_glog",
+ sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
+ strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ ],
+)
+
+# absl
+http_archive(
+ name = "com_google_absl",
+ strip_prefix = "abseil-cpp-master",
+ urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
+load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
+load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
+load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
+
+switched_rules_by_language(
+ name = "com_google_googleapis_imports",
+ cc = True,
+)
+
+# Do *not* call *_dependencies(), etc, yet. See comment at the end.
+
+# Generated Google APIs protos for Golang
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_api",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/api",
+ sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_rpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/rpc",
+ sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# gRPC deps
+go_repository(
+ name = "org_golang_google_grpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/grpc",
+ tag = "v1.49.0",
+)
+
+go_repository(
+ name = "org_golang_x_net",
+ importpath = "golang.org/x/net",
+ sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
+ version = "v0.0.0-20190311183353-d8887717615a",
+)
+
+go_repository(
+ name = "org_golang_x_text",
+ importpath = "golang.org/x/text",
+ sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
+ version = "v0.3.2",
+)
+
+# Run the dependencies at the end. These will silently try to import some
+# of the above repositories but at different versions, so ours must come first.
+go_rules_dependencies()
+go_register_toolchains(version = "1.19.1")
+gazelle_dependencies()
+rules_proto_dependencies()
+rules_proto_toolchains()
+protobuf_deps()
diff --git a/vendor/cel.dev/expr/WORKSPACE.bzlmod b/vendor/cel.dev/expr/WORKSPACE.bzlmod
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go
new file mode 100644
index 000000000..bb225c8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/checked.pb.go
@@ -0,0 +1,1432 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/checked.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Type_PrimitiveType int32
+
+const (
+ Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+ Type_BOOL Type_PrimitiveType = 1
+ Type_INT64 Type_PrimitiveType = 2
+ Type_UINT64 Type_PrimitiveType = 3
+ Type_DOUBLE Type_PrimitiveType = 4
+ Type_STRING Type_PrimitiveType = 5
+ Type_BYTES Type_PrimitiveType = 6
+)
+
+// Enum value maps for Type_PrimitiveType.
+var (
+ Type_PrimitiveType_name = map[int32]string{
+ 0: "PRIMITIVE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "UINT64",
+ 4: "DOUBLE",
+ 5: "STRING",
+ 6: "BYTES",
+ }
+ Type_PrimitiveType_value = map[string]int32{
+ "PRIMITIVE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "UINT64": 3,
+ "DOUBLE": 4,
+ "STRING": 5,
+ "BYTES": 6,
+ }
+)
+
+func (x Type_PrimitiveType) Enum() *Type_PrimitiveType {
+ p := new(Type_PrimitiveType)
+ *p = x
+ return p
+}
+
+func (x Type_PrimitiveType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[0].Descriptor()
+}
+
+func (Type_PrimitiveType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[0]
+}
+
+func (x Type_PrimitiveType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_PrimitiveType.Descriptor instead.
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type Type_WellKnownType int32
+
+const (
+ Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+ Type_ANY Type_WellKnownType = 1
+ Type_TIMESTAMP Type_WellKnownType = 2
+ Type_DURATION Type_WellKnownType = 3
+)
+
+// Enum value maps for Type_WellKnownType.
+var (
+ Type_WellKnownType_name = map[int32]string{
+ 0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+ 1: "ANY",
+ 2: "TIMESTAMP",
+ 3: "DURATION",
+ }
+ Type_WellKnownType_value = map[string]int32{
+ "WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+ "ANY": 1,
+ "TIMESTAMP": 2,
+ "DURATION": 3,
+ }
+)
+
+func (x Type_WellKnownType) Enum() *Type_WellKnownType {
+ p := new(Type_WellKnownType)
+ *p = x
+ return p
+}
+
+func (x Type_WellKnownType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[1].Descriptor()
+}
+
+func (Type_WellKnownType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[1]
+}
+
+func (x Type_WellKnownType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_WellKnownType.Descriptor instead.
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+type CheckedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"`
+ Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+}
+
+func (x *CheckedExpr) Reset() {
+ *x = CheckedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CheckedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CheckedExpr) ProtoMessage() {}
+
+func (x *CheckedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead.
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+ if x != nil {
+ return x.ReferenceMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetTypeMap() map[int64]*Type {
+ if x != nil {
+ return x.TypeMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetExprVersion() string {
+ if x != nil {
+ return x.ExprVersion
+ }
+ return ""
+}
+
+func (x *CheckedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+type Type struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to TypeKind:
+ //
+ // *Type_Dyn
+ // *Type_Null
+ // *Type_Primitive
+ // *Type_Wrapper
+ // *Type_WellKnown
+ // *Type_ListType_
+ // *Type_MapType_
+ // *Type_Function
+ // *Type_MessageType
+ // *Type_TypeParam
+ // *Type_Type
+ // *Type_Error
+ // *Type_AbstractType_
+ TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+}
+
+func (x *Type) Reset() {
+ *x = Type{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type) ProtoMessage() {}
+
+func (x *Type) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type.ProtoReflect.Descriptor instead.
+func (*Type) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+ if m != nil {
+ return m.TypeKind
+ }
+ return nil
+}
+
+func (x *Type) GetDyn() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Dyn); ok {
+ return x.Dyn
+ }
+ return nil
+}
+
+func (x *Type) GetNull() structpb.NullValue {
+ if x, ok := x.GetTypeKind().(*Type_Null); ok {
+ return x.Null
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Type) GetPrimitive() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Primitive); ok {
+ return x.Primitive
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWrapper() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Wrapper); ok {
+ return x.Wrapper
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWellKnown() Type_WellKnownType {
+ if x, ok := x.GetTypeKind().(*Type_WellKnown); ok {
+ return x.WellKnown
+ }
+ return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetListType() *Type_ListType {
+ if x, ok := x.GetTypeKind().(*Type_ListType_); ok {
+ return x.ListType
+ }
+ return nil
+}
+
+func (x *Type) GetMapType() *Type_MapType {
+ if x, ok := x.GetTypeKind().(*Type_MapType_); ok {
+ return x.MapType
+ }
+ return nil
+}
+
+func (x *Type) GetFunction() *Type_FunctionType {
+ if x, ok := x.GetTypeKind().(*Type_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+func (x *Type) GetMessageType() string {
+ if x, ok := x.GetTypeKind().(*Type_MessageType); ok {
+ return x.MessageType
+ }
+ return ""
+}
+
+func (x *Type) GetTypeParam() string {
+ if x, ok := x.GetTypeKind().(*Type_TypeParam); ok {
+ return x.TypeParam
+ }
+ return ""
+}
+
+func (x *Type) GetType() *Type {
+ if x, ok := x.GetTypeKind().(*Type_Type); ok {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Type) GetError() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Type) GetAbstractType() *Type_AbstractType {
+ if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok {
+ return x.AbstractType
+ }
+ return nil
+}
+
+type isType_TypeKind interface {
+ isType_TypeKind()
+}
+
+type Type_Dyn struct {
+ Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+ Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+ Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+ Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+ WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+ ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+ MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+ Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+ MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+ TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+ Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+ Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+ AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+type Decl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to DeclKind:
+ //
+ // *Decl_Ident
+ // *Decl_Function
+ DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+}
+
+func (x *Decl) Reset() {
+ *x = Decl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl) ProtoMessage() {}
+
+func (x *Decl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl.ProtoReflect.Descriptor instead.
+func (*Decl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Decl) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+ if m != nil {
+ return m.DeclKind
+ }
+ return nil
+}
+
+func (x *Decl) GetIdent() *Decl_IdentDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Ident); ok {
+ return x.Ident
+ }
+ return nil
+}
+
+func (x *Decl) GetFunction() *Decl_FunctionDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+type isDecl_DeclKind interface {
+ isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+ Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+ Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+type Reference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Reference) Reset() {
+ *x = Reference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Reference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reference) ProtoMessage() {}
+
+func (x *Reference) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reference.ProtoReflect.Descriptor instead.
+func (*Reference) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reference) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Reference) GetOverloadId() []string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return nil
+}
+
+func (x *Reference) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type Type_ListType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+}
+
+func (x *Type_ListType) Reset() {
+ *x = Type_ListType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_ListType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_ListType) ProtoMessage() {}
+
+func (x *Type_ListType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead.
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Type_ListType) GetElemType() *Type {
+ if x != nil {
+ return x.ElemType
+ }
+ return nil
+}
+
+type Type_MapType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+ ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+}
+
+func (x *Type_MapType) Reset() {
+ *x = Type_MapType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_MapType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_MapType) ProtoMessage() {}
+
+func (x *Type_MapType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead.
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Type_MapType) GetKeyType() *Type {
+ if x != nil {
+ return x.KeyType
+ }
+ return nil
+}
+
+func (x *Type_MapType) GetValueType() *Type {
+ if x != nil {
+ return x.ValueType
+ }
+ return nil
+}
+
+type Type_FunctionType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+}
+
+func (x *Type_FunctionType) Reset() {
+ *x = Type_FunctionType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_FunctionType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_FunctionType) ProtoMessage() {}
+
+func (x *Type_FunctionType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead.
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Type_FunctionType) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Type_FunctionType) GetArgTypes() []*Type {
+ if x != nil {
+ return x.ArgTypes
+ }
+ return nil
+}
+
+type Type_AbstractType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+}
+
+func (x *Type_AbstractType) Reset() {
+ *x = Type_AbstractType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_AbstractType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_AbstractType) ProtoMessage() {}
+
+func (x *Type_AbstractType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead.
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Type_AbstractType) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Type_AbstractType) GetParameterTypes() []*Type {
+ if x != nil {
+ return x.ParameterTypes
+ }
+ return nil
+}
+
+type Decl_IdentDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_IdentDecl) Reset() {
+ *x = Decl_IdentDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_IdentDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_IdentDecl) ProtoMessage() {}
+
+func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead.
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *Decl_IdentDecl) GetType() *Type {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+type Decl_FunctionDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+}
+
+func (x *Decl_FunctionDecl) Reset() {
+ *x = Decl_FunctionDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+ if x != nil {
+ return x.Overloads
+ }
+ return nil
+}
+
+type Decl_FunctionDecl_Overload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+ TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+ ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+ Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_FunctionDecl_Overload) Reset() {
+ *x = Decl_FunctionDecl_Overload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl_Overload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *Decl_FunctionDecl_Overload) GetOverloadId() string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return ""
+}
+
+func (x *Decl_FunctionDecl_Overload) GetParams() []*Type {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+ if x != nil {
+ return x.TypeParams
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+ if x != nil {
+ return x.IsInstanceFunction
+ }
+ return false
+}
+
+func (x *Decl_FunctionDecl_Overload) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+var File_cel_expr_checked_proto protoreflect.FileDescriptor
+
+var file_cel_expr_checked_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e,
+ 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64,
+ 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+ 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d,
+ 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61,
+ 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48,
+ 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69,
+ 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+ 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c,
+ 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70,
+ 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b,
+ 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79,
+ 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73,
+ 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49,
+ 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59,
+ 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12,
+ 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a,
+ 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44,
+ 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c,
+ 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63,
+ 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f,
+ 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65,
+ 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c,
+ 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a,
+ 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30,
+ 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64,
+ 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+ 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49,
+ 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64,
+ 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63,
+ 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_checked_proto_rawDescOnce sync.Once
+ file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc
+)
+
+func file_cel_expr_checked_proto_rawDescGZIP() []byte {
+ file_cel_expr_checked_proto_rawDescOnce.Do(func() {
+ file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData)
+ })
+ return file_cel_expr_checked_proto_rawDescData
+}
+
+var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_cel_expr_checked_proto_goTypes = []interface{}{
+ (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType
+ (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType
+ (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr
+ (*Type)(nil), // 3: cel.expr.Type
+ (*Decl)(nil), // 4: cel.expr.Decl
+ (*Reference)(nil), // 5: cel.expr.Reference
+ nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry
+ nil, // 7: cel.expr.CheckedExpr.TypeMapEntry
+ (*Type_ListType)(nil), // 8: cel.expr.Type.ListType
+ (*Type_MapType)(nil), // 9: cel.expr.Type.MapType
+ (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType
+ (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType
+ (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl
+ (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl
+ (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload
+ (*SourceInfo)(nil), // 15: cel.expr.SourceInfo
+ (*Expr)(nil), // 16: cel.expr.Expr
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+ (structpb.NullValue)(0), // 18: google.protobuf.NullValue
+ (*Constant)(nil), // 19: cel.expr.Constant
+}
+var file_cel_expr_checked_proto_depIdxs = []int32{
+ 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry
+ 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry
+ 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr
+ 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty
+ 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue
+ 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType
+ 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType
+ 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType
+ 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType
+ 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType
+ 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType
+ 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type
+ 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty
+ 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType
+ 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl
+ 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl
+ 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant
+ 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference
+ 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type
+ 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type
+ 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type
+ 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type
+ 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type
+ 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type
+ 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type
+ 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type
+ 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant
+ 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload
+ 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type
+ 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type
+ 31, // [31:31] is the sub-list for method output_type
+ 31, // [31:31] is the sub-list for method input_type
+ 31, // [31:31] is the sub-list for extension type_name
+ 31, // [31:31] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_checked_proto_init() }
+func file_cel_expr_checked_proto_init() {
+ if File_cel_expr_checked_proto != nil {
+ return
+ }
+ file_cel_expr_syntax_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CheckedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Reference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_ListType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_MapType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_FunctionType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_AbstractType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_IdentDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl_Overload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Type_Dyn)(nil),
+ (*Type_Null)(nil),
+ (*Type_Primitive)(nil),
+ (*Type_Wrapper)(nil),
+ (*Type_WellKnown)(nil),
+ (*Type_ListType_)(nil),
+ (*Type_MapType_)(nil),
+ (*Type_Function)(nil),
+ (*Type_MessageType)(nil),
+ (*Type_TypeParam)(nil),
+ (*Type_Type)(nil),
+ (*Type_Error)(nil),
+ (*Type_AbstractType_)(nil),
+ }
+ file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Decl_Ident)(nil),
+ (*Decl_Function)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_checked_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_checked_proto_goTypes,
+ DependencyIndexes: file_cel_expr_checked_proto_depIdxs,
+ EnumInfos: file_cel_expr_checked_proto_enumTypes,
+ MessageInfos: file_cel_expr_checked_proto_msgTypes,
+ }.Build()
+ File_cel_expr_checked_proto = out.File
+ file_cel_expr_checked_proto_rawDesc = nil
+ file_cel_expr_checked_proto_goTypes = nil
+ file_cel_expr_checked_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml
new file mode 100644
index 000000000..e3e533a04
--- /dev/null
+++ b/vendor/cel.dev/expr/cloudbuild.yaml
@@ -0,0 +1,9 @@
+steps:
+- name: 'gcr.io/cloud-builders/bazel:7.3.2'
+ entrypoint: bazel
+ args: ['build', '...']
+ id: bazel-build
+ waitFor: ['-']
+timeout: 15m
+options:
+ machineType: 'N1_HIGHCPU_32'
diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go
new file mode 100644
index 000000000..a7aae0900
--- /dev/null
+++ b/vendor/cel.dev/expr/eval.pb.go
@@ -0,0 +1,487 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.3
+// protoc v5.27.1
+// source: cel/expr/eval.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type EvalState struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EvalState) Reset() {
+ *x = EvalState{}
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EvalState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState) ProtoMessage() {}
+
+func (x *EvalState) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
+func (*EvalState) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *EvalState) GetValues() []*ExprValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *EvalState) GetResults() []*EvalState_Result {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
+type ExprValue struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to Kind:
+ //
+ // *ExprValue_Value
+ // *ExprValue_Error
+ // *ExprValue_Unknown
+ Kind isExprValue_Kind `protobuf_oneof:"kind"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExprValue) Reset() {
+ *x = ExprValue{}
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExprValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExprValue) ProtoMessage() {}
+
+func (x *ExprValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
+func (*ExprValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExprValue) GetKind() isExprValue_Kind {
+ if x != nil {
+ return x.Kind
+ }
+ return nil
+}
+
+func (x *ExprValue) GetValue() *Value {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Value); ok {
+ return x.Value
+ }
+ }
+ return nil
+}
+
+func (x *ExprValue) GetError() *ErrorSet {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Error); ok {
+ return x.Error
+ }
+ }
+ return nil
+}
+
+func (x *ExprValue) GetUnknown() *UnknownSet {
+ if x != nil {
+ if x, ok := x.Kind.(*ExprValue_Unknown); ok {
+ return x.Unknown
+ }
+ }
+ return nil
+}
+
+type isExprValue_Kind interface {
+ isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+ Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+ Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+ Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+type ErrorSet struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Errors []*Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ErrorSet) Reset() {
+ *x = ErrorSet{}
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ErrorSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorSet) ProtoMessage() {}
+
+func (x *ErrorSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ErrorSet) GetErrors() []*Status {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+type Status struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Status) Reset() {
+ *x = Status{}
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Status) GetCode() int32 {
+ if x != nil {
+ return x.Code
+ }
+ return 0
+}
+
+func (x *Status) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *Status) GetDetails() []*anypb.Any {
+ if x != nil {
+ return x.Details
+ }
+ return nil
+}
+
+type UnknownSet struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *UnknownSet) Reset() {
+ *x = UnknownSet{}
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UnknownSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnknownSet) ProtoMessage() {}
+
+func (x *UnknownSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UnknownSet) GetExprs() []int64 {
+ if x != nil {
+ return x.Exprs
+ }
+ return nil
+}
+
+type EvalState_Result struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+ Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EvalState_Result) Reset() {
+ *x = EvalState_Result{}
+ mi := &file_cel_expr_eval_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EvalState_Result) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState_Result) ProtoMessage() {}
+
+func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *EvalState_Result) GetExpr() int64 {
+ if x != nil {
+ return x.Expr
+ }
+ return 0
+}
+
+func (x *EvalState_Result) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_cel_expr_eval_proto protoreflect.FileDescriptor
+
+var file_cel_expr_eval_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f,
+ 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b,
+ 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72,
+ 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74,
+ 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
+ 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48,
+ 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e,
+ 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48,
+ 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69,
+ 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28,
+ 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65,
+ 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8,
+ 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_eval_proto_rawDescOnce sync.Once
+ file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
+)
+
+func file_cel_expr_eval_proto_rawDescGZIP() []byte {
+ file_cel_expr_eval_proto_rawDescOnce.Do(func() {
+ file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
+ })
+ return file_cel_expr_eval_proto_rawDescData
+}
+
+var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_cel_expr_eval_proto_goTypes = []any{
+ (*EvalState)(nil), // 0: cel.expr.EvalState
+ (*ExprValue)(nil), // 1: cel.expr.ExprValue
+ (*ErrorSet)(nil), // 2: cel.expr.ErrorSet
+ (*Status)(nil), // 3: cel.expr.Status
+ (*UnknownSet)(nil), // 4: cel.expr.UnknownSet
+ (*EvalState_Result)(nil), // 5: cel.expr.EvalState.Result
+ (*Value)(nil), // 6: cel.expr.Value
+ (*anypb.Any)(nil), // 7: google.protobuf.Any
+}
+var file_cel_expr_eval_proto_depIdxs = []int32{
+ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
+ 5, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
+ 6, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
+ 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
+ 4, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
+ 3, // 5: cel.expr.ErrorSet.errors:type_name -> cel.expr.Status
+ 7, // 6: cel.expr.Status.details:type_name -> google.protobuf.Any
+ 7, // [7:7] is the sub-list for method output_type
+ 7, // [7:7] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_eval_proto_init() }
+func file_cel_expr_eval_proto_init() {
+ if File_cel_expr_eval_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []any{
+ (*ExprValue_Value)(nil),
+ (*ExprValue_Error)(nil),
+ (*ExprValue_Unknown)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_eval_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_eval_proto_goTypes,
+ DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
+ MessageInfos: file_cel_expr_eval_proto_msgTypes,
+ }.Build()
+ File_cel_expr_eval_proto = out.File
+ file_cel_expr_eval_proto_rawDesc = nil
+ file_cel_expr_eval_proto_goTypes = nil
+ file_cel_expr_eval_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go
new file mode 100644
index 000000000..79fd5443b
--- /dev/null
+++ b/vendor/cel.dev/expr/explain.pb.go
@@ -0,0 +1,236 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/explain.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Deprecated: Do not use.
+type Explain struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+}
+
+func (x *Explain) Reset() {
+ *x = Explain{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain) ProtoMessage() {}
+
+func (x *Explain) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
+func (*Explain) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Explain) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *Explain) GetExprSteps() []*Explain_ExprStep {
+ if x != nil {
+ return x.ExprSteps
+ }
+ return nil
+}
+
+type Explain_ExprStep struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+}
+
+func (x *Explain_ExprStep) Reset() {
+ *x = Explain_ExprStep{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain_ExprStep) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain_ExprStep) ProtoMessage() {}
+
+func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Explain_ExprStep) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Explain_ExprStep) GetValueIndex() int32 {
+ if x != nil {
+ return x.ValueIndex
+ }
+ return 0
+}
+
+var File_cel_expr_explain_proto protoreflect.FileDescriptor
+
+var file_cel_expr_explain_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
+ 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
+ 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
+ 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_explain_proto_rawDescOnce sync.Once
+ file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
+)
+
+func file_cel_expr_explain_proto_rawDescGZIP() []byte {
+ file_cel_expr_explain_proto_rawDescOnce.Do(func() {
+ file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
+ })
+ return file_cel_expr_explain_proto_rawDescData
+}
+
+var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cel_expr_explain_proto_goTypes = []interface{}{
+ (*Explain)(nil), // 0: cel.expr.Explain
+ (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
+ (*Value)(nil), // 2: cel.expr.Value
+}
+var file_cel_expr_explain_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
+ 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_explain_proto_init() }
+func file_cel_expr_explain_proto_init() {
+ if File_cel_expr_explain_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain_ExprStep); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_explain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_explain_proto_goTypes,
+ DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
+ MessageInfos: file_cel_expr_explain_proto_msgTypes,
+ }.Build()
+ File_cel_expr_explain_proto = out.File
+ file_cel_expr_explain_proto_rawDesc = nil
+ file_cel_expr_explain_proto_goTypes = nil
+ file_cel_expr_explain_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh
new file mode 100644
index 000000000..fdcbb3ce2
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+bazel build //proto/cel/expr/conformance/...
+files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
+for src in ${files[@]};
+do
+ dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/')
+ echo "copying $dst"
+ $(cp $src $dst)
+done
diff --git a/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
new file mode 100644
index 000000000..9a13479e4
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+bazel build //proto/cel/expr:all
+
+rm -vf ./*.pb.go
+
+files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
+for src in "${files[@]}";
+do
+ cp -v "${src}" ./
+done
diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go
new file mode 100644
index 000000000..48a952872
--- /dev/null
+++ b/vendor/cel.dev/expr/syntax.pb.go
@@ -0,0 +1,1633 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/syntax.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type SourceInfo_Extension_Component int32
+
+const (
+ SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0
+ SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1
+ SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2
+ SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3
+)
+
+// Enum value maps for SourceInfo_Extension_Component.
+var (
+ SourceInfo_Extension_Component_name = map[int32]string{
+ 0: "COMPONENT_UNSPECIFIED",
+ 1: "COMPONENT_PARSER",
+ 2: "COMPONENT_TYPE_CHECKER",
+ 3: "COMPONENT_RUNTIME",
+ }
+ SourceInfo_Extension_Component_value = map[string]int32{
+ "COMPONENT_UNSPECIFIED": 0,
+ "COMPONENT_PARSER": 1,
+ "COMPONENT_TYPE_CHECKER": 2,
+ "COMPONENT_RUNTIME": 3,
+ }
+)
+
+func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component {
+ p := new(SourceInfo_Extension_Component)
+ *p = x
+ return p
+}
+
+func (x SourceInfo_Extension_Component) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_syntax_proto_enumTypes[0].Descriptor()
+}
+
+func (SourceInfo_Extension_Component) Type() protoreflect.EnumType {
+ return &file_cel_expr_syntax_proto_enumTypes[0]
+}
+
+func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead.
+func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+type ParsedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+ SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+}
+
+func (x *ParsedExpr) Reset() {
+ *x = ParsedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ParsedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParsedExpr) ProtoMessage() {}
+
+func (x *ParsedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead.
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ParsedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+func (x *ParsedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+type Expr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to ExprKind:
+ //
+ // *Expr_ConstExpr
+ // *Expr_IdentExpr
+ // *Expr_SelectExpr
+ // *Expr_CallExpr
+ // *Expr_ListExpr
+ // *Expr_StructExpr
+ // *Expr_ComprehensionExpr
+ ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+}
+
+func (x *Expr) Reset() {
+ *x = Expr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr) ProtoMessage() {}
+
+func (x *Expr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr.ProtoReflect.Descriptor instead.
+func (*Expr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Expr) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (x *Expr) GetConstExpr() *Constant {
+ if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok {
+ return x.ConstExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetIdentExpr() *Expr_Ident {
+ if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok {
+ return x.IdentExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetSelectExpr() *Expr_Select {
+ if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok {
+ return x.SelectExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetCallExpr() *Expr_Call {
+ if x, ok := x.GetExprKind().(*Expr_CallExpr); ok {
+ return x.CallExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetListExpr() *Expr_CreateList {
+ if x, ok := x.GetExprKind().(*Expr_ListExpr); ok {
+ return x.ListExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetStructExpr() *Expr_CreateStruct {
+ if x, ok := x.GetExprKind().(*Expr_StructExpr); ok {
+ return x.StructExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetComprehensionExpr() *Expr_Comprehension {
+ if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok {
+ return x.ComprehensionExpr
+ }
+ return nil
+}
+
+type isExpr_ExprKind interface {
+ isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+ ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+ IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+ SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+ CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+ ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+ StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+ ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+type Constant struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ConstantKind:
+ //
+ // *Constant_NullValue
+ // *Constant_BoolValue
+ // *Constant_Int64Value
+ // *Constant_Uint64Value
+ // *Constant_DoubleValue
+ // *Constant_StringValue
+ // *Constant_BytesValue
+ // *Constant_DurationValue
+ // *Constant_TimestampValue
+ ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+}
+
+func (x *Constant) Reset() {
+ *x = Constant{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Constant) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Constant) ProtoMessage() {}
+
+func (x *Constant) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Constant.ProtoReflect.Descriptor instead.
+func (*Constant) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+ if m != nil {
+ return m.ConstantKind
+ }
+ return nil
+}
+
+func (x *Constant) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetConstantKind().(*Constant_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Constant) GetBoolValue() bool {
+ if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Constant) GetInt64Value() int64 {
+ if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetUint64Value() uint64 {
+ if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetDoubleValue() float64 {
+ if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Constant) GetStringValue() string {
+ if x, ok := x.GetConstantKind().(*Constant_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Constant) GetBytesValue() []byte {
+ if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetDurationValue() *durationpb.Duration {
+ if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok {
+ return x.DurationValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetTimestampValue() *timestamppb.Timestamp {
+ if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok {
+ return x.TimestampValue
+ }
+ return nil
+}
+
+type isConstant_ConstantKind interface {
+ isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+ // Deprecated: Do not use.
+ DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+ // Deprecated: Do not use.
+ TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+type SourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+ Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *SourceInfo) Reset() {
+ *x = SourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo) ProtoMessage() {}
+
+func (x *SourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead.
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SourceInfo) GetSyntaxVersion() string {
+ if x != nil {
+ return x.SyntaxVersion
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLineOffsets() []int32 {
+ if x != nil {
+ return x.LineOffsets
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetPositions() map[int64]int32 {
+ if x != nil {
+ return x.Positions
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetMacroCalls() map[int64]*Expr {
+ if x != nil {
+ return x.MacroCalls
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+type Expr_Ident struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Expr_Ident) Reset() {
+ *x = Expr_Ident{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Ident) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Ident) ProtoMessage() {}
+
+func (x *Expr_Ident) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead.
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Expr_Ident) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type Expr_Select struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+ Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+ TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+}
+
+func (x *Expr_Select) Reset() {
+ *x = Expr_Select{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Select) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Select) ProtoMessage() {}
+
+func (x *Expr_Select) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead.
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Expr_Select) GetOperand() *Expr {
+ if x != nil {
+ return x.Operand
+ }
+ return nil
+}
+
+func (x *Expr_Select) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *Expr_Select) GetTestOnly() bool {
+ if x != nil {
+ return x.TestOnly
+ }
+ return false
+}
+
+type Expr_Call struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+}
+
+func (x *Expr_Call) Reset() {
+ *x = Expr_Call{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Call) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Call) ProtoMessage() {}
+
+func (x *Expr_Call) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead.
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Expr_Call) GetTarget() *Expr {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
+func (x *Expr_Call) GetFunction() string {
+ if x != nil {
+ return x.Function
+ }
+ return ""
+}
+
+func (x *Expr_Call) GetArgs() []*Expr {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+type Expr_CreateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+ OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"`
+}
+
+func (x *Expr_CreateList) Reset() {
+ *x = Expr_CreateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateList) ProtoMessage() {}
+
+func (x *Expr_CreateList) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead.
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Expr_CreateList) GetElements() []*Expr {
+ if x != nil {
+ return x.Elements
+ }
+ return nil
+}
+
+func (x *Expr_CreateList) GetOptionalIndices() []int32 {
+ if x != nil {
+ return x.OptionalIndices
+ }
+ return nil
+}
+
+type Expr_CreateStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+ Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Expr_CreateStruct) Reset() {
+ *x = Expr_CreateStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct) ProtoMessage() {}
+
+func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *Expr_CreateStruct) GetMessageName() string {
+ if x != nil {
+ return x.MessageName
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type Expr_Comprehension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+ IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+ AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+ AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+ LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+ LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+ Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+}
+
+func (x *Expr_Comprehension) Reset() {
+ *x = Expr_Comprehension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Comprehension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Comprehension) ProtoMessage() {}
+
+func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead.
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *Expr_Comprehension) GetIterVar() string {
+ if x != nil {
+ return x.IterVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetIterRange() *Expr {
+ if x != nil {
+ return x.IterRange
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetAccuVar() string {
+ if x != nil {
+ return x.AccuVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetAccuInit() *Expr {
+ if x != nil {
+ return x.AccuInit
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopCondition() *Expr {
+ if x != nil {
+ return x.LoopCondition
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopStep() *Expr {
+ if x != nil {
+ return x.LoopStep
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetResult() *Expr {
+ if x != nil {
+ return x.Result
+ }
+ return nil
+}
+
+type Expr_CreateStruct_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to KeyKind:
+ //
+ // *Expr_CreateStruct_Entry_FieldKey
+ // *Expr_CreateStruct_Entry_MapKey
+ KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+ Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"`
+}
+
+func (x *Expr_CreateStruct_Entry) Reset() {
+ *x = Expr_CreateStruct_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+
+func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0}
+}
+
+func (x *Expr_CreateStruct_Entry) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+ if m != nil {
+ return m.KeyKind
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetFieldKey() string {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+ return x.FieldKey
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+ return x.MapKey
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetValue() *Expr {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool {
+ if x != nil {
+ return x.OptionalEntry
+ }
+ return false
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+ isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+ FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+ MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+type SourceInfo_Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"`
+ Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *SourceInfo_Extension) Reset() {
+ *x = SourceInfo_Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension) ProtoMessage() {}
+
+func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2}
+}
+
+func (x *SourceInfo_Extension) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component {
+ if x != nil {
+ return x.AffectedComponents
+ }
+ return nil
+}
+
+func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+type SourceInfo_Extension_Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+}
+
+func (x *SourceInfo_Extension_Version) Reset() {
+ *x = SourceInfo_Extension_Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension_Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension_Version) ProtoMessage() {}
+
+func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+func (x *SourceInfo_Extension_Version) GetMajor() int64 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *SourceInfo_Extension_Version) GetMinor() int64 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+var File_cel_expr_syntax_proto protoreflect.FileDescriptor
+
+var file_cel_expr_syntax_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61,
+ 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22,
+ 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78,
+ 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78,
+ 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38,
+ 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
+ 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09,
+ 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69,
+ 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65,
+ 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c,
+ 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab,
+ 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a,
+ 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70,
+ 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19,
+ 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65,
+ 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69,
+ 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75,
+ 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75,
+ 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74,
+ 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f,
+ 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09,
+ 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36,
+ 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52,
+ 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01,
+ 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06,
+ 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e,
+ 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61,
+ 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12,
+ 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43,
+ 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50,
+ 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54,
+ 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d,
+ 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43,
+ 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45,
+ 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79,
+ 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c,
+ 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_syntax_proto_rawDescOnce sync.Once
+ file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc
+)
+
+func file_cel_expr_syntax_proto_rawDescGZIP() []byte {
+ file_cel_expr_syntax_proto_rawDescOnce.Do(func() {
+ file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData)
+ })
+ return file_cel_expr_syntax_proto_rawDescData
+}
+
+var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_cel_expr_syntax_proto_goTypes = []interface{}{
+ (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component
+ (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr
+ (*Expr)(nil), // 2: cel.expr.Expr
+ (*Constant)(nil), // 3: cel.expr.Constant
+ (*SourceInfo)(nil), // 4: cel.expr.SourceInfo
+ (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident
+ (*Expr_Select)(nil), // 6: cel.expr.Expr.Select
+ (*Expr_Call)(nil), // 7: cel.expr.Expr.Call
+ (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList
+ (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct
+ (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension
+ (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry
+ nil, // 12: cel.expr.SourceInfo.PositionsEntry
+ nil, // 13: cel.expr.SourceInfo.MacroCallsEntry
+ (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension
+ (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version
+ (structpb.NullValue)(0), // 16: google.protobuf.NullValue
+ (*durationpb.Duration)(nil), // 17: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp
+}
+var file_cel_expr_syntax_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr
+ 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant
+ 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident
+ 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select
+ 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call
+ 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList
+ 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct
+ 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension
+ 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue
+ 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration
+ 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp
+ 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry
+ 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry
+ 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension
+ 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr
+ 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr
+ 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr
+ 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr
+ 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry
+ 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr
+ 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr
+ 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr
+ 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr
+ 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr
+ 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr
+ 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr
+ 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr
+ 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component
+ 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_syntax_proto_init() }
+func file_cel_expr_syntax_proto_init() {
+ if File_cel_expr_syntax_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParsedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Constant); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Ident); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Select); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Call); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Comprehension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension_Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Expr_ConstExpr)(nil),
+ (*Expr_IdentExpr)(nil),
+ (*Expr_SelectExpr)(nil),
+ (*Expr_CallExpr)(nil),
+ (*Expr_ListExpr)(nil),
+ (*Expr_StructExpr)(nil),
+ (*Expr_ComprehensionExpr)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Constant_NullValue)(nil),
+ (*Constant_BoolValue)(nil),
+ (*Constant_Int64Value)(nil),
+ (*Constant_Uint64Value)(nil),
+ (*Constant_DoubleValue)(nil),
+ (*Constant_StringValue)(nil),
+ (*Constant_BytesValue)(nil),
+ (*Constant_DurationValue)(nil),
+ (*Constant_TimestampValue)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*Expr_CreateStruct_Entry_FieldKey)(nil),
+ (*Expr_CreateStruct_Entry_MapKey)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_syntax_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_syntax_proto_goTypes,
+ DependencyIndexes: file_cel_expr_syntax_proto_depIdxs,
+ EnumInfos: file_cel_expr_syntax_proto_enumTypes,
+ MessageInfos: file_cel_expr_syntax_proto_msgTypes,
+ }.Build()
+ File_cel_expr_syntax_proto = out.File
+ file_cel_expr_syntax_proto_rawDesc = nil
+ file_cel_expr_syntax_proto_goTypes = nil
+ file_cel_expr_syntax_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go
new file mode 100644
index 000000000..e5e29228c
--- /dev/null
+++ b/vendor/cel.dev/expr/value.pb.go
@@ -0,0 +1,653 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/value.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *Value_NullValue
+ // *Value_BoolValue
+ // *Value_Int64Value
+ // *Value_Uint64Value
+ // *Value_DoubleValue
+ // *Value_StringValue
+ // *Value_BytesValue
+ // *Value_EnumValue
+ // *Value_ObjectValue
+ // *Value_MapValue
+ // *Value_ListValue
+ // *Value_TypeValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *Value) Reset() {
+ *x = Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *Value) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Value) GetBoolValue() bool {
+ if x, ok := x.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Value) GetInt64Value() int64 {
+ if x, ok := x.GetKind().(*Value_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Value) GetUint64Value() uint64 {
+ if x, ok := x.GetKind().(*Value_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Value) GetDoubleValue() float64 {
+ if x, ok := x.GetKind().(*Value_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Value) GetStringValue() string {
+ if x, ok := x.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Value) GetBytesValue() []byte {
+ if x, ok := x.GetKind().(*Value_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+func (x *Value) GetEnumValue() *EnumValue {
+ if x, ok := x.GetKind().(*Value_EnumValue); ok {
+ return x.EnumValue
+ }
+ return nil
+}
+
+func (x *Value) GetObjectValue() *anypb.Any {
+ if x, ok := x.GetKind().(*Value_ObjectValue); ok {
+ return x.ObjectValue
+ }
+ return nil
+}
+
+func (x *Value) GetMapValue() *MapValue {
+ if x, ok := x.GetKind().(*Value_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+ if x, ok := x.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+func (x *Value) GetTypeValue() string {
+ if x, ok := x.GetKind().(*Value_TypeValue); ok {
+ return x.TypeValue
+ }
+ return ""
+}
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+ EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+ ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+ MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+ TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+type EnumValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EnumValue) Reset() {
+ *x = EnumValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValue) ProtoMessage() {}
+
+func (x *EnumValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
+func (*EnumValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EnumValue) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *EnumValue) GetValue() int32 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+type ListValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ListValue) Reset() {
+ *x = ListValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+type MapValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *MapValue) Reset() {
+ *x = MapValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue) ProtoMessage() {}
+
+func (x *MapValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
+func (*MapValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MapValue) GetEntries() []*MapValue_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type MapValue_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MapValue_Entry) Reset() {
+ *x = MapValue_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue_Entry) ProtoMessage() {}
+
+func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *MapValue_Entry) GetKey() *Value {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *MapValue_Entry) GetValue() *Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_cel_expr_value_proto protoreflect.FileDescriptor
+
+var file_cel_expr_value_proto_rawDesc = []byte{
+ 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
+ 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+ 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+ 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
+ 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
+ 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
+ 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
+ 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_value_proto_rawDescOnce sync.Once
+ file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
+)
+
+func file_cel_expr_value_proto_rawDescGZIP() []byte {
+ file_cel_expr_value_proto_rawDescOnce.Do(func() {
+ file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
+ })
+ return file_cel_expr_value_proto_rawDescData
+}
+
+var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_value_proto_goTypes = []interface{}{
+ (*Value)(nil), // 0: cel.expr.Value
+ (*EnumValue)(nil), // 1: cel.expr.EnumValue
+ (*ListValue)(nil), // 2: cel.expr.ListValue
+ (*MapValue)(nil), // 3: cel.expr.MapValue
+ (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
+ (structpb.NullValue)(0), // 5: google.protobuf.NullValue
+ (*anypb.Any)(nil), // 6: google.protobuf.Any
+}
+var file_cel_expr_value_proto_depIdxs = []int32{
+ 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
+ 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
+ 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
+ 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
+ 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
+ 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
+ 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
+ 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
+ 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_value_proto_init() }
+func file_cel_expr_value_proto_init() {
+ if File_cel_expr_value_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_Int64Value)(nil),
+ (*Value_Uint64Value)(nil),
+ (*Value_DoubleValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BytesValue)(nil),
+ (*Value_EnumValue)(nil),
+ (*Value_ObjectValue)(nil),
+ (*Value_MapValue)(nil),
+ (*Value_ListValue)(nil),
+ (*Value_TypeValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_value_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_value_proto_goTypes,
+ DependencyIndexes: file_cel_expr_value_proto_depIdxs,
+ MessageInfos: file_cel_expr_value_proto_msgTypes,
+ }.Build()
+ File_cel_expr_value_proto = out.File
+ file_cel_expr_value_proto_rawDesc = nil
+ file_cel_expr_value_proto_goTypes = nil
+ file_cel_expr_value_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md
new file mode 100644
index 000000000..500c34cf4
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/CHANGES.md
@@ -0,0 +1,396 @@
+# Changelog
+
+## [0.15.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.1...auth/v0.15.0) (2025-02-19)
+
+
+### Features
+
+* **auth:** Add hard-bound token request to compute token provider. ([#11588](https://github.com/googleapis/google-cloud-go/issues/11588)) ([0e608bb](https://github.com/googleapis/google-cloud-go/commit/0e608bb5ac3d694c8ad36ca4340071d3a2c78699))
+
+## [0.14.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.0...auth/v0.14.1) (2025-01-24)
+
+
+### Documentation
+
+* **auth:** Add warning about externally-provided credentials ([#11462](https://github.com/googleapis/google-cloud-go/issues/11462)) ([49fb6ff](https://github.com/googleapis/google-cloud-go/commit/49fb6ff4d754895f82c9c4d502fc7547d3b5a941))
+
+## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08)
+
+
+### Features
+
+* **auth:** Add universe domain support to idtoken ([#11059](https://github.com/googleapis/google-cloud-go/issues/11059)) ([72add7e](https://github.com/googleapis/google-cloud-go/commit/72add7e9f8f455af695e8ef79212a4bd3122fb3a))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+* **auth:** Fix copy of delegates in impersonate.NewIDTokenCredentials ([#11386](https://github.com/googleapis/google-cloud-go/issues/11386)) ([ff7ef8e](https://github.com/googleapis/google-cloud-go/commit/ff7ef8e7ade7171bce3e4f30ff10a2e9f6c27ca0)), refs [#11379](https://github.com/googleapis/google-cloud-go/issues/11379)
+* **auth:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13)
+
+
+### Features
+
+* **auth:** Add logging support ([#11079](https://github.com/googleapis/google-cloud-go/issues/11079)) ([c80e31d](https://github.com/googleapis/google-cloud-go/commit/c80e31df5ecb33a810be3dfb9d9e27ac531aa91d))
+* **auth:** Pass logger from auth layer to metadata package ([#11288](https://github.com/googleapis/google-cloud-go/issues/11288)) ([b552efd](https://github.com/googleapis/google-cloud-go/commit/b552efd6ab34e5dfded18438e0fbfd925805614f))
+
+
+### Bug Fixes
+
+* **auth:** Check compute cred type before non-default flag for DP ([#11255](https://github.com/googleapis/google-cloud-go/issues/11255)) ([4347ca1](https://github.com/googleapis/google-cloud-go/commit/4347ca141892be8ae813399b4b437662a103bc90))
+
+## [0.12.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.0...auth/v0.12.1) (2024-12-10)
+
+
+### Bug Fixes
+
+* **auth:** Correct typo in link ([#11160](https://github.com/googleapis/google-cloud-go/issues/11160)) ([af6fb46](https://github.com/googleapis/google-cloud-go/commit/af6fb46d7cd694ddbe8c9d63bc4cdcd62b9fb2c1))
+
+## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.11.0...auth/v0.12.0) (2024-12-04)
+
+
+### Features
+
+* **auth:** Add support for providing custom certificate URL ([#11006](https://github.com/googleapis/google-cloud-go/issues/11006)) ([ebf3657](https://github.com/googleapis/google-cloud-go/commit/ebf36579724afb375d3974cf1da38f703e3b7dbc)), refs [#11005](https://github.com/googleapis/google-cloud-go/issues/11005)
+
+
+### Bug Fixes
+
+* **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188)
+
+## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21)
+
+
+### Features
+
+* **auth:** Add universe domain support to mTLS ([#11159](https://github.com/googleapis/google-cloud-go/issues/11159)) ([117748b](https://github.com/googleapis/google-cloud-go/commit/117748ba1cfd4ae62a6a4feb7e30951cb2bc9344))
+
+## [0.10.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.1...auth/v0.10.2) (2024-11-12)
+
+
+### Bug Fixes
+
+* **auth:** Restore use of grpc.Dial ([#11118](https://github.com/googleapis/google-cloud-go/issues/11118)) ([2456b94](https://github.com/googleapis/google-cloud-go/commit/2456b943b7b8aaabd4d8bfb7572c0f477ae0db45)), refs [#7556](https://github.com/googleapis/google-cloud-go/issues/7556)
+
+## [0.10.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.0...auth/v0.10.1) (2024-11-06)
+
+
+### Bug Fixes
+
+* **auth:** Restore Application Default Credentials support to idtoken ([#11083](https://github.com/googleapis/google-cloud-go/issues/11083)) ([8771f2e](https://github.com/googleapis/google-cloud-go/commit/8771f2ea9807ab822083808e0678392edff3b4f2))
+* **auth:** Skip impersonate universe domain check if empty ([#11086](https://github.com/googleapis/google-cloud-go/issues/11086)) ([87159c1](https://github.com/googleapis/google-cloud-go/commit/87159c1059d4a18d1367ce62746a838a94964ab6))
+
+## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30)
+
+
+### Features
+
+* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b))
+
+## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22)
+
+
+### Bug Fixes
+
+* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844)
+* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114))
+
+## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09)
+
+
+### Bug Fixes
+
+* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962)
+* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287))
+
+## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907)
+
+## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30)
+
+
+### Bug Fixes
+
+* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8))
+
+## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2))
+* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1))
+* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350))
+
+## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11)
+
+
+### Bug Fixes
+
+* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6))
+
+## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03)
+
+
+### Bug Fixes
+
+* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804)
+
+## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30)
+
+
+### Bug Fixes
+
+* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742)
+* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795)
+
+
+### Documentation
+
+* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437))
+
+## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22)
+
+
+### Bug Fixes
+
+* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948))
+
+## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16)
+
+
+### Features
+
+* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45))
+
+## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13)
+
+
+### Bug Fixes
+
+* **auth:** Make default client creation more lenient ([#10669](https://github.com/googleapis/google-cloud-go/issues/10669)) ([1afb9ee](https://github.com/googleapis/google-cloud-go/commit/1afb9ee1ee9de9810722800018133304a0ca34d1)), refs [#10638](https://github.com/googleapis/google-cloud-go/issues/10638)
+
+## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.3...auth/v0.8.0) (2024-08-07)
+
+
+### Features
+
+* **auth:** Adds support for X509 workload identity federation ([#10373](https://github.com/googleapis/google-cloud-go/issues/10373)) ([5d07505](https://github.com/googleapis/google-cloud-go/commit/5d075056cbe27bb1da4072a26070c41f8999eb9b))
+
+## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8))
+* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
+## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22)
+
+
+### Bug Fixes
+
+* **auth:** Use default client for universe metadata lookup ([#10551](https://github.com/googleapis/google-cloud-go/issues/10551)) ([d9046fd](https://github.com/googleapis/google-cloud-go/commit/d9046fdd1435d1ce48f374806c1def4cb5ac6cd3)), refs [#10544](https://github.com/googleapis/google-cloud-go/issues/10544)
+
+## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10)
+
+
+### Bug Fixes
+
+* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+
+## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09)
+
+
+### Features
+
+* **auth:** Add workload X509 cert provider as a default cert provider ([#10479](https://github.com/googleapis/google-cloud-go/issues/10479)) ([c51ee6c](https://github.com/googleapis/google-cloud-go/commit/c51ee6cf65ce05b4d501083e49d468c75ac1ea63))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+* **auth:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+* **auth:** Check len of slices, not non-nil ([#10483](https://github.com/googleapis/google-cloud-go/issues/10483)) ([0a966a1](https://github.com/googleapis/google-cloud-go/commit/0a966a183e5f0e811977216d736d875b7233e942))
+
+## [0.6.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.0...auth/v0.6.1) (2024-07-01)
+
+
+### Bug Fixes
+
+* **auth:** Support gRPC API keys ([#10460](https://github.com/googleapis/google-cloud-go/issues/10460)) ([daa6646](https://github.com/googleapis/google-cloud-go/commit/daa6646d2af5d7fb5b30489f4934c7db89868c7c))
+* **auth:** Update http and grpc transports to support token exchange over mTLS ([#10397](https://github.com/googleapis/google-cloud-go/issues/10397)) ([c6dfdcf](https://github.com/googleapis/google-cloud-go/commit/c6dfdcf893c3f971eba15026c12db0a960ae81f2))
+
+## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.2...auth/v0.6.0) (2024-06-25)
+
+
+### Features
+
+* **auth:** Add non-blocking token refresh for compute MDS ([#10263](https://github.com/googleapis/google-cloud-go/issues/10263)) ([9ac350d](https://github.com/googleapis/google-cloud-go/commit/9ac350da11a49b8e2174d3fc5b1a5070fec78b4e))
+
+
+### Bug Fixes
+
+* **auth:** Return error if envvar detected file returns an error ([#10431](https://github.com/googleapis/google-cloud-go/issues/10431)) ([e52b9a7](https://github.com/googleapis/google-cloud-go/commit/e52b9a7c45468827f5d220ab00965191faeb9d05))
+
+## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.1...auth/v0.5.2) (2024-06-24)
+
+
+### Bug Fixes
+
+* **auth:** Fetch initial token when CachedTokenProviderOptions.DisableAutoRefresh is true ([#10415](https://github.com/googleapis/google-cloud-go/issues/10415)) ([3266763](https://github.com/googleapis/google-cloud-go/commit/32667635ca2efad05cd8c087c004ca07d7406913)), refs [#10414](https://github.com/googleapis/google-cloud-go/issues/10414)
+
+## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31)
+
+
+### Bug Fixes
+
+* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f))
+
+## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28)
+
+
+### Features
+
+* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150))
+
+## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16)
+
+
+### Bug Fixes
+
+* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15))
+* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159)
+* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8))
+* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea))
+
+## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09)
+
+
+### Bug Fixes
+
+* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc))
+
+## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07)
+
+
+### Features
+
+* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c))
+
+
+### Bug Fixes
+
+* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06))
+
+## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23)
+
+
+### Features
+
+* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814)
+
+
+### Bug Fixes
+
+* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809)
+
+## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19)
+
+
+### Bug Fixes
+
+* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823)
+* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833)
+
+## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18)
+
+
+### Bug Fixes
+
+* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7))
+
+## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15)
+
+### Breaking Changes
+
+In the below mentioned commits there were a few large breaking changes since the
+last release of the module.
+
+1. The `Credentials` type has been moved to the root of the module as it is
+ becoming the core abstraction for the whole module.
+2. Because of the above mentioned change many functions that previously
+ returned a `TokenProvider` now return `Credentials`. Similarly, these
+ functions have been renamed to be more specific.
+3. Most places that used to take an optional `TokenProvider` now accept
+ `Credentials`. You can make a `Credentials` from a `TokenProvider` using the
+ constructor found in the `auth` package.
+4. The `detect` package has been renamed to `credentials`. With this change some
+ function signatures were also updated for better readability.
+5. Derivative auth flows like `impersonate` and `downscope` have been moved to
+ be under the new `credentials` package.
+
+Although these changes are disruptive we think that they are for the best of the
+long-term health of the module. We do not expect any more large breaking changes
+like these in future revisions, even before 1.0.0. This version will be the
+first version of the auth library that our client libraries start to use and
+depend on.
+
+### Features
+
+* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6))
+* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d))
+* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670)
+* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501))
+* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac))
+* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d))
+* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b))
+* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd))
+* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824))
+* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9))
+* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+
+## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10)
+
+
+### Bug Fixes
+
+* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136)
+* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c))
+* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
+
+## 0.1.0 (2023-10-18)
+
+
+### Features
+
+* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e))
+* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993))
+* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8))
+* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7))
+* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c))
+* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5))
+* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff))
+* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe))
+* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04))
+* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507))
+* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
+* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
diff --git a/vendor/cloud.google.com/go/auth/LICENSE b/vendor/cloud.google.com/go/auth/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md
new file mode 100644
index 000000000..6fe4f0763
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/README.md
@@ -0,0 +1,40 @@
+# Google Auth Library for Go
+
+[](https://pkg.go.dev/cloud.google.com/go/auth)
+
+## Install
+
+``` bash
+go get cloud.google.com/go/auth@latest
+```
+
+## Usage
+
+The most common way this library is used is transitively, by default, from any
+of our Go client libraries.
+
+### Notable use-cases
+
+- To create a credential directly please see examples in the
+ [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials)
+ package.
+- To create a authenticated HTTP client please see examples in the
+ [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport)
+ package.
+- To create a authenticated gRPC connection please see examples in the
+ [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport)
+ package.
+- To create an ID token please see examples in the
+ [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken)
+ package.
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
+document for details.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go
new file mode 100644
index 000000000..cd5e98868
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/auth.go
@@ -0,0 +1,618 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package auth provides utilities for managing Google Cloud credentials,
+// including functionality for creating, caching, and refreshing OAuth2 tokens.
+// It offers customizable options for different OAuth2 flows, such as 2-legged
+// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic
+// token management.
+package auth
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/jwt"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ // Parameter keys for AuthCodeURL method to support PKCE.
+ codeChallengeKey = "code_challenge"
+ codeChallengeMethodKey = "code_challenge_method"
+
+ // Parameter key for Exchange method to support PKCE.
+ codeVerifierKey = "code_verifier"
+
+ // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes,
+ // so we give it 15 seconds to refresh it's cache before attempting to refresh a token.
+ defaultExpiryDelta = 225 * time.Second
+
+ universeDomainDefault = "googleapis.com"
+)
+
+// tokenState represents different states for a [Token].
+type tokenState int
+
+const (
+ // fresh indicates that the [Token] is valid. It is not expired or close to
+ // expired, or the token has no expiry.
+ fresh tokenState = iota
+ // stale indicates that the [Token] is close to expired, and should be
+ // refreshed. The token can be used normally.
+ stale
+ // invalid indicates that the [Token] is expired or invalid. The token
+ // cannot be used for a normal operation.
+ invalid
+)
+
+var (
+ defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType}
+
+ // for testing
+ timeNow = time.Now
+)
+
+// TokenProvider specifies an interface for anything that can return a token.
+type TokenProvider interface {
+ // Token returns a Token or an error.
+ // The Token returned must be safe to use
+ // concurrently.
+ // The returned Token must not be modified.
+ // The context provided must be sent along to any requests that are made in
+ // the implementing code.
+ Token(context.Context) (*Token, error)
+}
+
+// Token holds the credential token used to authorized requests. All fields are
+// considered read-only.
+type Token struct {
+ // Value is the token used to authorize requests. It is usually an access
+ // token but may be other types of tokens such as ID tokens in some flows.
+ Value string
+ // Type is the type of token Value is. If uninitialized, it should be
+ // assumed to be a "Bearer" token.
+ Type string
+ // Expiry is the time the token is set to expire.
+ Expiry time.Time
+ // Metadata may include, but is not limited to, the body of the token
+ // response returned by the server.
+ Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values
+}
+
+// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not
+// expired. A token is considered expired if [Token.Expiry] has passed or will
+// pass in the next 225 seconds.
+func (t *Token) IsValid() bool {
+ return t.isValidWithEarlyExpiry(defaultExpiryDelta)
+}
+
+// MetadataString is a convenience method for accessing string values in the
+// token's metadata. Returns an empty string if the metadata is nil or the value
+// for the given key cannot be cast to a string.
+func (t *Token) MetadataString(k string) string {
+ if t.Metadata == nil {
+ return ""
+ }
+ s, ok := t.Metadata[k].(string)
+ if !ok {
+ return ""
+ }
+ return s
+}
+
+func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool {
+ if t.isEmpty() {
+ return false
+ }
+ if t.Expiry.IsZero() {
+ return true
+ }
+ return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow())
+}
+
+func (t *Token) isEmpty() bool {
+ return t == nil || t.Value == ""
+}
+
+// Credentials holds Google credentials, including
+// [Application Default Credentials].
+//
+// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials
+type Credentials struct {
+ json []byte
+ projectID CredentialsPropertyProvider
+ quotaProjectID CredentialsPropertyProvider
+ // universeDomain is the default service domain for a given Cloud universe.
+ universeDomain CredentialsPropertyProvider
+
+ TokenProvider
+}
+
+// JSON returns the bytes associated with the the file used to source
+// credentials if one was used.
+func (c *Credentials) JSON() []byte {
+ return c.json
+}
+
+// ProjectID returns the associated project ID from the underlying file or
+// environment.
+func (c *Credentials) ProjectID(ctx context.Context) (string, error) {
+ if c.projectID == nil {
+ return internal.GetProjectID(c.json, ""), nil
+ }
+ v, err := c.projectID.GetProperty(ctx)
+ if err != nil {
+ return "", err
+ }
+ return internal.GetProjectID(c.json, v), nil
+}
+
+// QuotaProjectID returns the associated quota project ID from the underlying
+// file or environment.
+func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) {
+ if c.quotaProjectID == nil {
+ return internal.GetQuotaProject(c.json, ""), nil
+ }
+ v, err := c.quotaProjectID.GetProperty(ctx)
+ if err != nil {
+ return "", err
+ }
+ return internal.GetQuotaProject(c.json, v), nil
+}
+
+// UniverseDomain returns the default service domain for a given Cloud universe.
+// The default value is "googleapis.com".
+func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) {
+ if c.universeDomain == nil {
+ return universeDomainDefault, nil
+ }
+ v, err := c.universeDomain.GetProperty(ctx)
+ if err != nil {
+ return "", err
+ }
+ if v == "" {
+ return universeDomainDefault, nil
+ }
+ return v, err
+}
+
+// CredentialsPropertyProvider provides an implementation to fetch a property
+// value for [Credentials].
+type CredentialsPropertyProvider interface {
+ GetProperty(context.Context) (string, error)
+}
+
+// CredentialsPropertyFunc is a type adapter to allow the use of ordinary
+// functions as a [CredentialsPropertyProvider].
+type CredentialsPropertyFunc func(context.Context) (string, error)
+
+// GetProperty loads the properly value provided the given context.
+func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) {
+ return p(ctx)
+}
+
+// CredentialsOptions are used to configure [Credentials].
+type CredentialsOptions struct {
+ // TokenProvider is a means of sourcing a token for the credentials. Required.
+ TokenProvider TokenProvider
+ // JSON is the raw contents of the credentials file if sourced from a file.
+ JSON []byte
+ // ProjectIDProvider resolves the project ID associated with the
+ // credentials.
+ ProjectIDProvider CredentialsPropertyProvider
+ // QuotaProjectIDProvider resolves the quota project ID associated with the
+ // credentials.
+ QuotaProjectIDProvider CredentialsPropertyProvider
+ // UniverseDomainProvider resolves the universe domain with the credentials.
+ UniverseDomainProvider CredentialsPropertyProvider
+}
+
+// NewCredentials returns new [Credentials] from the provided options.
+func NewCredentials(opts *CredentialsOptions) *Credentials {
+ creds := &Credentials{
+ TokenProvider: opts.TokenProvider,
+ json: opts.JSON,
+ projectID: opts.ProjectIDProvider,
+ quotaProjectID: opts.QuotaProjectIDProvider,
+ universeDomain: opts.UniverseDomainProvider,
+ }
+
+ return creds
+}
+
+// CachedTokenProviderOptions provides options for configuring a cached
+// [TokenProvider].
+type CachedTokenProviderOptions struct {
+ // DisableAutoRefresh makes the TokenProvider always return the same token,
+ // even if it is expired. The default is false. Optional.
+ DisableAutoRefresh bool
+ // ExpireEarly configures the amount of time before a token expires, that it
+ // should be refreshed. If unset, the default value is 3 minutes and 45
+ // seconds. Optional.
+ ExpireEarly time.Duration
+ // DisableAsyncRefresh configures a synchronous workflow that refreshes
+ // tokens in a blocking manner. The default is false. Optional.
+ DisableAsyncRefresh bool
+}
+
+func (ctpo *CachedTokenProviderOptions) autoRefresh() bool {
+ if ctpo == nil {
+ return true
+ }
+ return !ctpo.DisableAutoRefresh
+}
+
+func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration {
+ if ctpo == nil || ctpo.ExpireEarly == 0 {
+ return defaultExpiryDelta
+ }
+ return ctpo.ExpireEarly
+}
+
+func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool {
+ if ctpo == nil {
+ return false
+ }
+ return ctpo.DisableAsyncRefresh
+}
+
+// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned
+// by the underlying provider. By default it will refresh tokens asynchronously
+// a few minutes before they expire.
+func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider {
+ if ctp, ok := tp.(*cachedTokenProvider); ok {
+ return ctp
+ }
+ return &cachedTokenProvider{
+ tp: tp,
+ autoRefresh: opts.autoRefresh(),
+ expireEarly: opts.expireEarly(),
+ blockingRefresh: opts.blockingRefresh(),
+ }
+}
+
+type cachedTokenProvider struct {
+ tp TokenProvider
+ autoRefresh bool
+ expireEarly time.Duration
+ blockingRefresh bool
+
+ mu sync.Mutex
+ cachedToken *Token
+ // isRefreshRunning ensures that the non-blocking refresh will only be
+ // attempted once, even if multiple callers enter the Token method.
+ isRefreshRunning bool
+ // isRefreshErr ensures that the non-blocking refresh will only be attempted
+ // once per refresh window if an error is encountered.
+ isRefreshErr bool
+}
+
+func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) {
+ if c.blockingRefresh {
+ return c.tokenBlocking(ctx)
+ }
+ return c.tokenNonBlocking(ctx)
+}
+
+func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, error) {
+ switch c.tokenState() {
+ case fresh:
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.cachedToken, nil
+ case stale:
+ // Call tokenAsync with a new Context because the user-provided context
+ // may have a short timeout incompatible with async token refresh.
+ c.tokenAsync(context.Background())
+ // Return the stale token immediately to not block customer requests to Cloud services.
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.cachedToken, nil
+ default: // invalid
+ return c.tokenBlocking(ctx)
+ }
+}
+
+// tokenState reports the token's validity.
+func (c *cachedTokenProvider) tokenState() tokenState {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ t := c.cachedToken
+ now := timeNow()
+ if t == nil || t.Value == "" {
+ return invalid
+ } else if t.Expiry.IsZero() {
+ return fresh
+ } else if now.After(t.Expiry.Round(0)) {
+ return invalid
+ } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) {
+ return stale
+ }
+ return fresh
+}
+
+// tokenAsync uses a bool to ensure that only one non-blocking token refresh
+// happens at a time, even if multiple callers have entered this function
+// concurrently. This avoids creating an arbitrary number of concurrent
+// goroutines. Retries should be attempted and managed within the Token method.
+// If the refresh attempt fails, no further attempts are made until the refresh
+// window expires and the token enters the invalid state, at which point the
+// blocking call to Token should likely return the same error on the main goroutine.
+func (c *cachedTokenProvider) tokenAsync(ctx context.Context) {
+ fn := func() {
+ c.mu.Lock()
+ c.isRefreshRunning = true
+ c.mu.Unlock()
+ t, err := c.tp.Token(ctx)
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.isRefreshRunning = false
+ if err != nil {
+ // Discard errors from the non-blocking refresh, but prevent further
+ // attempts.
+ c.isRefreshErr = true
+ return
+ }
+ c.cachedToken = t
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if !c.isRefreshRunning && !c.isRefreshErr {
+ go fn()
+ }
+}
+
+func (c *cachedTokenProvider) tokenBlocking(ctx context.Context) (*Token, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.isRefreshErr = false
+ if c.cachedToken.IsValid() || (!c.autoRefresh && !c.cachedToken.isEmpty()) {
+ return c.cachedToken, nil
+ }
+ t, err := c.tp.Token(ctx)
+ if err != nil {
+ return nil, err
+ }
+ c.cachedToken = t
+ return t, nil
+}
+
+// Error is a error associated with retrieving a [Token]. It can hold useful
+// additional details for debugging.
+type Error struct {
+ // Response is the HTTP response associated with error. The body will always
+ // be already closed and consumed.
+ Response *http.Response
+ // Body is the HTTP response body.
+ Body []byte
+ // Err is the underlying wrapped error.
+ Err error
+
+ // code returned in the token response
+ code string
+ // description returned in the token response
+ description string
+ // uri returned in the token response
+ uri string
+}
+
+func (e *Error) Error() string {
+ if e.code != "" {
+ s := fmt.Sprintf("auth: %q", e.code)
+ if e.description != "" {
+ s += fmt.Sprintf(" %q", e.description)
+ }
+ if e.uri != "" {
+ s += fmt.Sprintf(" %q", e.uri)
+ }
+ return s
+ }
+ return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body)
+}
+
+// Temporary returns true if the error is considered temporary and may be able
+// to be retried.
+func (e *Error) Temporary() bool {
+ if e.Response == nil {
+ return false
+ }
+ sc := e.Response.StatusCode
+ return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests
+}
+
+func (e *Error) Unwrap() error {
+ return e.Err
+}
+
+// Style describes how the token endpoint wants to receive the ClientID and
+// ClientSecret.
+type Style int
+
+const (
+ // StyleUnknown means the value has not been initiated. Sending this in
+ // a request will cause the token exchange to fail.
+ StyleUnknown Style = iota
+ // StyleInParams sends client info in the body of a POST request.
+ StyleInParams
+ // StyleInHeader sends client info using Basic Authorization header.
+ StyleInHeader
+)
+
+// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow.
+type Options2LO struct {
+ // Email is the OAuth2 client ID. This value is set as the "iss" in the
+ // JWT.
+ Email string
+ // PrivateKey contains the contents of an RSA private key or the
+ // contents of a PEM file that contains a private key. It is used to sign
+ // the JWT created.
+ PrivateKey []byte
+ // TokenURL is th URL the JWT is sent to. Required.
+ TokenURL string
+ // PrivateKeyID is the ID of the key used to sign the JWT. It is used as the
+ // "kid" in the JWT header. Optional.
+ PrivateKeyID string
+ // Subject is the used for to impersonate a user. It is used as the "sub" in
+ // the JWT.m Optional.
+ Subject string
+ // Scopes specifies requested permissions for the token. Optional.
+ Scopes []string
+ // Expires specifies the lifetime of the token. Optional.
+ Expires time.Duration
+ // Audience specifies the "aud" in the JWT. Optional.
+ Audience string
+ // PrivateClaims allows specifying any custom claims for the JWT. Optional.
+ PrivateClaims map[string]interface{}
+
+ // Client is the client to be used to make the underlying token requests.
+ // Optional.
+ Client *http.Client
+ // UseIDToken requests that the token returned be an ID token if one is
+ // returned from the server. Optional.
+ UseIDToken bool
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+func (o *Options2LO) client() *http.Client {
+ if o.Client != nil {
+ return o.Client
+ }
+ return internal.DefaultClient()
+}
+
+func (o *Options2LO) validate() error {
+ if o == nil {
+ return errors.New("auth: options must be provided")
+ }
+ if o.Email == "" {
+ return errors.New("auth: email must be provided")
+ }
+ if len(o.PrivateKey) == 0 {
+ return errors.New("auth: private key must be provided")
+ }
+ if o.TokenURL == "" {
+ return errors.New("auth: token URL must be provided")
+ }
+ return nil
+}
+
+// New2LOTokenProvider returns a [TokenProvider] from the provided options.
+func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil
+}
+
+type tokenProvider2LO struct {
+ opts *Options2LO
+ Client *http.Client
+ logger *slog.Logger
+}
+
+func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
+ pk, err := internal.ParseKey(tp.opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ claimSet := &jwt.Claims{
+ Iss: tp.opts.Email,
+ Scope: strings.Join(tp.opts.Scopes, " "),
+ Aud: tp.opts.TokenURL,
+ AdditionalClaims: tp.opts.PrivateClaims,
+ Sub: tp.opts.Subject,
+ }
+ if t := tp.opts.Expires; t > 0 {
+ claimSet.Exp = time.Now().Add(t).Unix()
+ }
+ if aud := tp.opts.Audience; aud != "" {
+ claimSet.Aud = aud
+ }
+ h := *defaultHeader
+ h.KeyID = tp.opts.PrivateKeyID
+ payload, err := jwt.EncodeJWS(&h, claimSet, pk)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", defaultGrantType)
+ v.Set("assertion", payload)
+ req, err := http.NewRequestWithContext(ctx, "POST", tp.opts.TokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
+ resp, body, err := internal.DoRequest(tp.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
+ }
+ tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
+ return nil, &Error{
+ Response: resp,
+ Body: body,
+ }
+ }
+ // tokenRes is the JSON response body.
+ var tokenRes struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ IDToken string `json:"id_token"`
+ ExpiresIn int64 `json:"expires_in"`
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
+ }
+ token := &Token{
+ Value: tokenRes.AccessToken,
+ Type: tokenRes.TokenType,
+ }
+ token.Metadata = make(map[string]interface{})
+ json.Unmarshal(body, &token.Metadata) // no error checks for optional fields
+
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ if v := tokenRes.IDToken; v != "" {
+ // decode returned id token to get expiry
+ claimSet, err := jwt.DecodeJWS(v)
+ if err != nil {
+ return nil, fmt.Errorf("auth: error decoding JWT token: %w", err)
+ }
+ token.Expiry = time.Unix(claimSet.Exp, 0)
+ }
+ if tp.opts.UseIDToken {
+ if tokenRes.IDToken == "" {
+ return nil, fmt.Errorf("auth: response doesn't have JWT token")
+ }
+ token.Value = tokenRes.IDToken
+ }
+ return token, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go
new file mode 100644
index 000000000..e4a8078f8
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/compute.go
@@ -0,0 +1,102 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/compute/metadata"
+)
+
+var (
+ computeTokenMetadata = map[string]interface{}{
+ "auth.google.tokenSource": "compute-metadata",
+ "auth.google.serviceAccount": "default",
+ }
+ computeTokenURI = "instance/service-accounts/default/token"
+)
+
+// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that
+// uses the metadata service to retrieve tokens.
+func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider {
+ return auth.NewCachedTokenProvider(&computeProvider{
+ scopes: opts.Scopes,
+ client: client,
+ tokenBindingType: opts.TokenBindingType,
+ }, &auth.CachedTokenProviderOptions{
+ ExpireEarly: opts.EarlyTokenRefresh,
+ DisableAsyncRefresh: opts.DisableAsyncRefresh,
+ })
+}
+
+// computeProvider fetches tokens from the google cloud metadata service.
+type computeProvider struct {
+ scopes []string
+ client *metadata.Client
+ tokenBindingType TokenBindingType
+}
+
+type metadataTokenResp struct {
+ AccessToken string `json:"access_token"`
+ ExpiresInSec int `json:"expires_in"`
+ TokenType string `json:"token_type"`
+}
+
+func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) {
+ tokenURI, err := url.Parse(computeTokenURI)
+ if err != nil {
+ return nil, err
+ }
+ hasScopes := len(cs.scopes) > 0
+ if hasScopes || cs.tokenBindingType != NoBinding {
+ v := url.Values{}
+ if hasScopes {
+ v.Set("scopes", strings.Join(cs.scopes, ","))
+ }
+ switch cs.tokenBindingType {
+ case MTLSHardBinding:
+ v.Set("transport", "mtls")
+ v.Set("binding-enforcement", "on")
+ case ALTSHardBinding:
+ v.Set("transport", "alts")
+ }
+ tokenURI.RawQuery = v.Encode()
+ }
+ tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String())
+ if err != nil {
+ return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
+ }
+ var res metadataTokenResp
+ if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil {
+ return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err)
+ }
+ if res.ExpiresInSec == 0 || res.AccessToken == "" {
+ return nil, errors.New("credentials: incomplete token received from metadata")
+ }
+ return &auth.Token{
+ Value: res.AccessToken,
+ Type: res.TokenType,
+ Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+ Metadata: computeTokenMetadata,
+ }, nil
+
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go
new file mode 100644
index 000000000..d8f7d9614
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/detect.go
@@ -0,0 +1,316 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "os"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "cloud.google.com/go/compute/metadata"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ // jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow.
+ jwtTokenURL = "https://oauth2.googleapis.com/token"
+
+ // Google's OAuth 2.0 default endpoints.
+ googleAuthURL = "https://accounts.google.com/o/oauth2/auth"
+ googleTokenURL = "https://oauth2.googleapis.com/token"
+
+ // GoogleMTLSTokenURL is Google's default OAuth2.0 mTLS endpoint.
+ GoogleMTLSTokenURL = "https://oauth2.mtls.googleapis.com/token"
+
+ // Help on default credentials
+ adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc"
+)
+
+var (
+ // for testing
+ allowOnGCECheck = true
+)
+
+// TokenBindingType specifies the type of binding used when requesting a token
+// whether to request a hard-bound token using mTLS or an instance identity
+// bound token using ALTS.
+type TokenBindingType int
+
+const (
+ // NoBinding specifies that requested tokens are not required to have a
+ // binding. This is the default option.
+ NoBinding TokenBindingType = iota
+ // MTLSHardBinding specifies that a hard-bound token should be requested
+ // using an mTLS with S2A channel.
+ MTLSHardBinding
+ // ALTSHardBinding specifies that an instance identity bound token should
+ // be requested using an ALTS channel.
+ ALTSHardBinding
+)
+
+// OnGCE reports whether this process is running in Google Cloud.
+func OnGCE() bool {
+ // TODO(codyoss): once all libs use this auth lib move metadata check here
+ return allowOnGCECheck && metadata.OnGCE()
+}
+
+// DetectDefault searches for "Application Default Credentials" and returns
+// a credential based on the [DetectOptions] provided.
+//
+// It looks for credentials in the following places, preferring the first
+// location found:
+//
+// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS
+// environment variable. For workload identity federation, refer to
+// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation
+// on how to generate the JSON configuration file for on-prem/non-Google
+// cloud platforms.
+// - A JSON file in a location known to the gcloud command-line tool. On
+// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On
+// other systems, $HOME/.config/gcloud/application_default_credentials.json.
+// - On Google Compute Engine, Google App Engine standard second generation
+// runtimes, and Google App Engine flexible environment, it fetches
+// credentials from the metadata server.
+func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ if len(opts.CredentialsJSON) > 0 {
+ return readCredentialsFileJSON(opts.CredentialsJSON, opts)
+ }
+ if opts.CredentialsFile != "" {
+ return readCredentialsFile(opts.CredentialsFile, opts)
+ }
+ if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" {
+ creds, err := readCredentialsFile(filename, opts)
+ if err != nil {
+ return nil, err
+ }
+ return creds, nil
+ }
+
+ fileName := credsfile.GetWellKnownFileName()
+ if b, err := os.ReadFile(fileName); err == nil {
+ return readCredentialsFileJSON(b, opts)
+ }
+
+ if OnGCE() {
+ metadataClient := metadata.NewWithOptions(&metadata.Options{
+ Logger: opts.logger(),
+ })
+ return auth.NewCredentials(&auth.CredentialsOptions{
+ TokenProvider: computeTokenProvider(opts, metadataClient),
+ ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
+ return metadataClient.ProjectIDWithContext(ctx)
+ }),
+ UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{
+ MetadataClient: metadataClient,
+ },
+ }), nil
+ }
+
+ return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL)
+}
+
+// DetectOptions provides configuration for [DetectDefault].
+type DetectOptions struct {
+ // Scopes that credentials tokens should have. Example:
+ // https://www.googleapis.com/auth/cloud-platform. Required if Audience is
+ // not provided.
+ Scopes []string
+ // TokenBindingType specifies the type of binding used when requesting a
+ // token whether to request a hard-bound token using mTLS or an instance
+ // identity bound token using ALTS. Optional.
+ TokenBindingType TokenBindingType
+ // Audience that credentials tokens should have. Only applicable for 2LO
+ // flows with service accounts. If specified, scopes should not be provided.
+ Audience string
+ // Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority).
+ // Optional.
+ Subject string
+ // EarlyTokenRefresh configures how early before a token expires that it
+ // should be refreshed. Once the token’s time until expiration has entered
+ // this refresh window the token is considered valid but stale. If unset,
+ // the default value is 3 minutes and 45 seconds. Optional.
+ EarlyTokenRefresh time.Duration
+ // DisableAsyncRefresh configures a synchronous workflow that refreshes
+ // stale tokens while blocking. The default is false. Optional.
+ DisableAsyncRefresh bool
+ // AuthHandlerOptions configures an authorization handler and other options
+ // for 3LO flows. It is required, and only used, for client credential
+ // flows.
+ AuthHandlerOptions *auth.AuthorizationHandlerOptions
+ // TokenURL allows to set the token endpoint for user credential flows. If
+ // unset the default value is: https://oauth2.googleapis.com/token.
+ // Optional.
+ TokenURL string
+ // STSAudience is the audience sent to when retrieving an STS token.
+ // Currently this only used for GDCH auth flow, for which it is required.
+ STSAudience string
+ // CredentialsFile overrides detection logic and sources a credential file
+ // from the provided filepath. If provided, CredentialsJSON must not be.
+ // Optional.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
+ CredentialsFile string
+ // CredentialsJSON overrides detection logic and uses the JSON bytes as the
+ // source for the credential. If provided, CredentialsFile must not be.
+ // Optional.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
+ CredentialsJSON []byte
+ // UseSelfSignedJWT directs service account based credentials to create a
+ // self-signed JWT with the private key found in the file, skipping any
+ // network requests that would normally be made. Optional.
+ UseSelfSignedJWT bool
+ // Client configures the underlying client used to make network requests
+ // when fetching tokens. Optional.
+ Client *http.Client
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ // The default value is "googleapis.com". This option is ignored for
+ // authentication flows that do not support universe domain. Optional.
+ UniverseDomain string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+func (o *DetectOptions) validate() error {
+ if o == nil {
+ return errors.New("credentials: options must be provided")
+ }
+ if len(o.Scopes) > 0 && o.Audience != "" {
+ return errors.New("credentials: both scopes and audience were provided")
+ }
+ if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" {
+ return errors.New("credentials: both credentials file and JSON were provided")
+ }
+ return nil
+}
+
+func (o *DetectOptions) tokenURL() string {
+ if o.TokenURL != "" {
+ return o.TokenURL
+ }
+ return googleTokenURL
+}
+
+func (o *DetectOptions) scopes() []string {
+ scopes := make([]string, len(o.Scopes))
+ copy(scopes, o.Scopes)
+ return scopes
+}
+
+func (o *DetectOptions) client() *http.Client {
+ if o.Client != nil {
+ return o.Client
+ }
+ return internal.DefaultClient()
+}
+
+func (o *DetectOptions) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
+func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return readCredentialsFileJSON(b, opts)
+}
+
+func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
+ // attempt to parse jsonData as a Google Developers Console client_credentials.json.
+ config := clientCredConfigFromJSON(b, opts)
+ if config != nil {
+ if config.AuthHandlerOpts == nil {
+ return nil, errors.New("credentials: auth handler must be specified for this credential filetype")
+ }
+ tp, err := auth.New3LOTokenProvider(config)
+ if err != nil {
+ return nil, err
+ }
+ return auth.NewCredentials(&auth.CredentialsOptions{
+ TokenProvider: tp,
+ JSON: b,
+ }), nil
+ }
+ return fileCredentials(b, opts)
+}
+
+func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO {
+ var creds credsfile.ClientCredentialsFile
+ var c *credsfile.Config3LO
+ if err := json.Unmarshal(b, &creds); err != nil {
+ return nil
+ }
+ switch {
+ case creds.Web != nil:
+ c = creds.Web
+ case creds.Installed != nil:
+ c = creds.Installed
+ default:
+ return nil
+ }
+ if len(c.RedirectURIs) < 1 {
+ return nil
+ }
+ var handleOpts *auth.AuthorizationHandlerOptions
+ if opts.AuthHandlerOptions != nil {
+ handleOpts = &auth.AuthorizationHandlerOptions{
+ Handler: opts.AuthHandlerOptions.Handler,
+ State: opts.AuthHandlerOptions.State,
+ PKCEOpts: opts.AuthHandlerOptions.PKCEOpts,
+ }
+ }
+ return &auth.Options3LO{
+ ClientID: c.ClientID,
+ ClientSecret: c.ClientSecret,
+ RedirectURL: c.RedirectURIs[0],
+ Scopes: opts.scopes(),
+ AuthURL: c.AuthURI,
+ TokenURL: c.TokenURI,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ EarlyTokenExpiry: opts.EarlyTokenRefresh,
+ AuthHandlerOpts: handleOpts,
+ // TODO(codyoss): refactor this out. We need to add in auto-detection
+ // for this use case.
+ AuthStyle: auth.StyleInParams,
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/doc.go b/vendor/cloud.google.com/go/auth/credentials/doc.go
new file mode 100644
index 000000000..1dbb2866b
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credentials provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs. It supports the Web server flow,
+// client-side credentials, service accounts, Google Compute Engine service
+// accounts, Google App Engine service accounts and workload identity federation
+// from non-Google cloud platforms.
+//
+// A brief overview of the package follows. For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/docs/application-default-credentials.
+// For more information on using workload identity federation, refer to
+// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation.
+//
+// # Credentials
+//
+// The [cloud.google.com/go/auth.Credentials] type represents Google
+// credentials, including Application Default Credentials.
+//
+// Use [DetectDefault] to obtain Application Default Credentials.
+//
+// Application Default Credentials support workload identity federation to
+// access Google Cloud resources from non-Google Cloud platforms including Amazon
+// Web Services (AWS), Microsoft Azure or any identity provider that supports
+// OpenID Connect (OIDC). Workload identity federation is recommended for
+// non-Google Cloud environments as it avoids the need to download, manage, and
+// store service account private keys locally.
+//
+// # Workforce Identity Federation
+//
+// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount].
+package credentials
diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
new file mode 100644
index 000000000..e5243e6cf
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
@@ -0,0 +1,231 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "errors"
+ "fmt"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/credentials/internal/externalaccount"
+ "cloud.google.com/go/auth/credentials/internal/externalaccountuser"
+ "cloud.google.com/go/auth/credentials/internal/gdch"
+ "cloud.google.com/go/auth/credentials/internal/impersonate"
+ internalauth "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+)
+
+func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
+ fileType, err := credsfile.ParseFileType(b)
+ if err != nil {
+ return nil, err
+ }
+
+ var projectID, universeDomain string
+ var tp auth.TokenProvider
+ switch fileType {
+ case credsfile.ServiceAccountKey:
+ f, err := credsfile.ParseServiceAccount(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleServiceAccount(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ projectID = f.ProjectID
+ universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ case credsfile.UserCredentialsKey:
+ f, err := credsfile.ParseUserCredentials(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleUserCredential(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ universeDomain = f.UniverseDomain
+ case credsfile.ExternalAccountKey:
+ f, err := credsfile.ParseExternalAccount(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleExternalAccount(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ case credsfile.ExternalAccountAuthorizedUserKey:
+ f, err := credsfile.ParseExternalAccountAuthorizedUser(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleExternalAccountAuthorizedUser(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ universeDomain = f.UniverseDomain
+ case credsfile.ImpersonatedServiceAccountKey:
+ f, err := credsfile.ParseImpersonatedServiceAccount(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleImpersonatedServiceAccount(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ case credsfile.GDCHServiceAccountKey:
+ f, err := credsfile.ParseGDCHServiceAccount(b)
+ if err != nil {
+ return nil, err
+ }
+ tp, err = handleGDCHServiceAccount(f, opts)
+ if err != nil {
+ return nil, err
+ }
+ projectID = f.Project
+ universeDomain = f.UniverseDomain
+ default:
+ return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType)
+ }
+ return auth.NewCredentials(&auth.CredentialsOptions{
+ TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{
+ ExpireEarly: opts.EarlyTokenRefresh,
+ }),
+ JSON: b,
+ ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
+ // TODO(codyoss): only set quota project here if there was a user override
+ UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain),
+ }), nil
+}
+
+// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to
+// support configuring universe-specific credentials in code. Auth flows
+// unsupported for universe domain should not use this func, but should instead
+// simply set the file universe domain on the credentials.
+func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string {
+ if optsUniverseDomain != "" {
+ return optsUniverseDomain
+ }
+ return fileUniverseDomain
+}
+
+func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
+ if opts.UseSelfSignedJWT {
+ return configureSelfSignedJWT(f, opts)
+ } else if ud != "" && ud != internalauth.DefaultUniverseDomain {
+ // For non-GDU universe domains, token exchange is impossible and services
+ // must support self-signed JWTs.
+ opts.UseSelfSignedJWT = true
+ return configureSelfSignedJWT(f, opts)
+ }
+ opts2LO := &auth.Options2LO{
+ Email: f.ClientEmail,
+ PrivateKey: []byte(f.PrivateKey),
+ PrivateKeyID: f.PrivateKeyID,
+ Scopes: opts.scopes(),
+ TokenURL: f.TokenURL,
+ Subject: opts.Subject,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ }
+ if opts2LO.TokenURL == "" {
+ opts2LO.TokenURL = jwtTokenURL
+ }
+ return auth.New2LOTokenProvider(opts2LO)
+}
+
+func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ opts3LO := &auth.Options3LO{
+ ClientID: f.ClientID,
+ ClientSecret: f.ClientSecret,
+ Scopes: opts.scopes(),
+ AuthURL: googleAuthURL,
+ TokenURL: opts.tokenURL(),
+ AuthStyle: auth.StyleInParams,
+ EarlyTokenExpiry: opts.EarlyTokenRefresh,
+ RefreshToken: f.RefreshToken,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ }
+ return auth.New3LOTokenProvider(opts3LO)
+}
+
+func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ externalOpts := &externalaccount.Options{
+ Audience: f.Audience,
+ SubjectTokenType: f.SubjectTokenType,
+ TokenURL: f.TokenURL,
+ TokenInfoURL: f.TokenInfoURL,
+ ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL,
+ ClientSecret: f.ClientSecret,
+ ClientID: f.ClientID,
+ CredentialSource: f.CredentialSource,
+ QuotaProjectID: f.QuotaProjectID,
+ Scopes: opts.scopes(),
+ WorkforcePoolUserProject: f.WorkforcePoolUserProject,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ IsDefaultClient: opts.Client == nil,
+ }
+ if f.ServiceAccountImpersonation != nil {
+ externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds
+ }
+ return externalaccount.NewTokenProvider(externalOpts)
+}
+
+func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ externalOpts := &externalaccountuser.Options{
+ Audience: f.Audience,
+ RefreshToken: f.RefreshToken,
+ TokenURL: f.TokenURL,
+ TokenInfoURL: f.TokenInfoURL,
+ ClientID: f.ClientID,
+ ClientSecret: f.ClientSecret,
+ Scopes: opts.scopes(),
+ Client: opts.client(),
+ Logger: opts.logger(),
+ }
+ return externalaccountuser.NewTokenProvider(externalOpts)
+}
+
+func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil {
+ return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials")
+ }
+
+ tp, err := fileCredentials(f.CredSource, opts)
+ if err != nil {
+ return nil, err
+ }
+ return impersonate.NewTokenProvider(&impersonate.Options{
+ URL: f.ServiceAccountImpersonationURL,
+ Scopes: opts.scopes(),
+ Tp: tp,
+ Delegates: f.Delegates,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ })
+}
+
+func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ return gdch.NewTokenProvider(f, &gdch.Options{
+ STSAudience: opts.STSAudience,
+ Client: opts.client(),
+ Logger: opts.logger(),
+ })
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
new file mode 100644
index 000000000..9ecd1f64b
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
@@ -0,0 +1,531 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "bytes"
+ "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+var (
+ // getenv aliases os.Getenv for testing
+ getenv = os.Getenv
+)
+
+const (
+ // AWS Signature Version 4 signing algorithm identifier.
+ awsAlgorithm = "AWS4-HMAC-SHA256"
+
+ // The termination string for the AWS credential scope value as defined in
+ // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
+ awsRequestType = "aws4_request"
+
+ // The AWS authorization header name for the security session token if available.
+ awsSecurityTokenHeader = "x-amz-security-token"
+
+ // The name of the header containing the session token for metadata endpoint calls
+ awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token"
+
+ awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
+
+ awsIMDSv2SessionTTL = "300"
+
+ // The AWS authorization header name for the auto-generated date.
+ awsDateHeader = "x-amz-date"
+
+ defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
+
+ // Supported AWS configuration environment variables.
+ awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID"
+ awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION"
+ awsRegionEnvVar = "AWS_REGION"
+ awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY"
+ awsSessionTokenEnvVar = "AWS_SESSION_TOKEN"
+
+ awsTimeFormatLong = "20060102T150405Z"
+ awsTimeFormatShort = "20060102"
+ awsProviderType = "aws"
+)
+
+type awsSubjectProvider struct {
+ EnvironmentID string
+ RegionURL string
+ RegionalCredVerificationURL string
+ CredVerificationURL string
+ IMDSv2SessionTokenURL string
+ TargetResource string
+ requestSigner *awsRequestSigner
+ region string
+ securityCredentialsProvider AwsSecurityCredentialsProvider
+ reqOpts *RequestOptions
+
+ Client *http.Client
+ logger *slog.Logger
+}
+
+func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) {
+ // Set Defaults
+ if sp.RegionalCredVerificationURL == "" {
+ sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
+ }
+ headers := make(map[string]string)
+ if sp.shouldUseMetadataServer() {
+ awsSessionToken, err := sp.getAWSSessionToken(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ if awsSessionToken != "" {
+ headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
+ }
+ }
+
+ awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ if err != nil {
+ return "", err
+ }
+ if sp.region, err = sp.getRegion(ctx, headers); err != nil {
+ return "", err
+ }
+ sp.requestSigner = &awsRequestSigner{
+ RegionName: sp.region,
+ AwsSecurityCredentials: awsSecurityCredentials,
+ }
+
+ // Generate the signed request to AWS STS GetCallerIdentity API.
+ // Use the required regional endpoint. Otherwise, the request will fail.
+ req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil)
+ if err != nil {
+ return "", err
+ }
+ // The full, canonical resource name of the workload identity pool
+ // provider, with or without the HTTPS prefix.
+ // Including this header as part of the signature is recommended to
+ // ensure data integrity.
+ if sp.TargetResource != "" {
+ req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource)
+ }
+ sp.requestSigner.signRequest(req)
+
+ /*
+ The GCP STS endpoint expects the headers to be formatted as:
+ # [
+ # {key: 'x-amz-date', value: '...'},
+ # {key: 'Authorization', value: '...'},
+ # ...
+ # ]
+ # And then serialized as:
+ # quote(json.dumps({
+ # url: '...',
+ # method: 'POST',
+ # headers: [{key: 'x-amz-date', value: '...'}, ...]
+ # }))
+ */
+
+ awsSignedReq := awsRequest{
+ URL: req.URL.String(),
+ Method: "POST",
+ }
+ for headerKey, headerList := range req.Header {
+ for _, headerValue := range headerList {
+ awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{
+ Key: headerKey,
+ Value: headerValue,
+ })
+ }
+ }
+ sort.Slice(awsSignedReq.Headers, func(i, j int) bool {
+ headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key)
+ if headerCompare == 0 {
+ return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0
+ }
+ return headerCompare < 0
+ })
+
+ result, err := json.Marshal(awsSignedReq)
+ if err != nil {
+ return "", err
+ }
+ return url.QueryEscape(string(result)), nil
+}
+
+func (sp *awsSubjectProvider) providerType() string {
+ if sp.securityCredentialsProvider != nil {
+ return programmaticProviderType
+ }
+ return awsProviderType
+}
+
+func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) {
+ if sp.IMDSv2SessionTokenURL == "" {
+ return "", nil
+ }
+ req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil)
+ if err != nil {
+ return "", err
+ }
+ req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL)
+
+ sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return "", err
+ }
+ sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body))
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body)
+ }
+ return string(body), nil
+}
+
+func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) {
+ if sp.securityCredentialsProvider != nil {
+ return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts)
+ }
+ if canRetrieveRegionFromEnvironment() {
+ if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" {
+ return envAwsRegion, nil
+ }
+ return getenv(awsDefaultRegionEnvVar), nil
+ }
+
+ if sp.RegionURL == "" {
+ return "", errors.New("credentials: unable to determine AWS region")
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil)
+ if err != nil {
+ return "", err
+ }
+
+ for name, value := range headers {
+ req.Header.Add(name, value)
+ }
+ sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return "", err
+ }
+ sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body))
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body)
+ }
+
+ // This endpoint will return the region in format: us-east-2b.
+ // Only the us-east-2 part should be used.
+ bodyLen := len(body)
+ if bodyLen == 0 {
+ return "", nil
+ }
+ return string(body[:bodyLen-1]), nil
+}
+
+func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) {
+ if sp.securityCredentialsProvider != nil {
+ return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts)
+ }
+ if canRetrieveSecurityCredentialFromEnvironment() {
+ return &AwsSecurityCredentials{
+ AccessKeyID: getenv(awsAccessKeyIDEnvVar),
+ SecretAccessKey: getenv(awsSecretAccessKeyEnvVar),
+ SessionToken: getenv(awsSessionTokenEnvVar),
+ }, nil
+ }
+
+ roleName, err := sp.getMetadataRoleName(ctx, headers)
+ if err != nil {
+ return
+ }
+ credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers)
+ if err != nil {
+ return
+ }
+
+ if credentials.AccessKeyID == "" {
+ return result, errors.New("credentials: missing AccessKeyId credential")
+ }
+ if credentials.SecretAccessKey == "" {
+ return result, errors.New("credentials: missing SecretAccessKey credential")
+ }
+
+ return credentials, nil
+}
+
+func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) {
+ var result *AwsSecurityCredentials
+
+ req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil)
+ if err != nil {
+ return result, err
+ }
+ for name, value := range headers {
+ req.Header.Add(name, value)
+ }
+ sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return result, err
+ }
+ sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body))
+ if resp.StatusCode != http.StatusOK {
+ return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body)
+ }
+ if err := json.Unmarshal(body, &result); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) {
+ if sp.CredVerificationURL == "" {
+ return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint")
+ }
+ req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil)
+ if err != nil {
+ return "", err
+ }
+ for name, value := range headers {
+ req.Header.Add(name, value)
+ }
+
+ sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return "", err
+ }
+ sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body))
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body)
+ }
+ return string(body), nil
+}
+
+// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature.
+type awsRequestSigner struct {
+ RegionName string
+ AwsSecurityCredentials *AwsSecurityCredentials
+}
+
+// signRequest adds the appropriate headers to an http.Request
+// or returns an error if something prevented this.
+func (rs *awsRequestSigner) signRequest(req *http.Request) error {
+ // req is assumed non-nil
+ signedRequest := cloneRequest(req)
+ timestamp := Now()
+ signedRequest.Header.Set("host", requestHost(req))
+ if rs.AwsSecurityCredentials.SessionToken != "" {
+ signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken)
+ }
+ if signedRequest.Header.Get("date") == "" {
+ signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong))
+ }
+ authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp)
+ if err != nil {
+ return err
+ }
+ signedRequest.Header.Set("Authorization", authorizationCode)
+ req.Header = signedRequest.Header
+ return nil
+}
+
+func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) {
+ canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req)
+ dateStamp := timestamp.Format(awsTimeFormatShort)
+ serviceName := ""
+
+ if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 {
+ serviceName = splitHost[0]
+ }
+ credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/")
+ requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData)
+ if err != nil {
+ return "", err
+ }
+ requestHash, err := getSha256([]byte(requestString))
+ if err != nil {
+ return "", err
+ }
+
+ stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n")
+ signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey)
+ for _, signingInput := range []string{
+ dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign,
+ } {
+ signingKey, err = getHmacSha256(signingKey, []byte(signingInput))
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil
+}
+
+func getSha256(input []byte) (string, error) {
+ hash := sha256.New()
+ if _, err := hash.Write(input); err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(hash.Sum(nil)), nil
+}
+
+func getHmacSha256(key, input []byte) ([]byte, error) {
+ hash := hmac.New(sha256.New, key)
+ if _, err := hash.Write(input); err != nil {
+ return nil, err
+ }
+ return hash.Sum(nil), nil
+}
+
+func cloneRequest(r *http.Request) *http.Request {
+ r2 := new(http.Request)
+ *r2 = *r
+ if r.Header != nil {
+ r2.Header = make(http.Header, len(r.Header))
+
+ // Find total number of values.
+ headerCount := 0
+ for _, headerValues := range r.Header {
+ headerCount += len(headerValues)
+ }
+ copiedHeaders := make([]string, headerCount) // shared backing array for headers' values
+
+ for headerKey, headerValues := range r.Header {
+ headerCount = copy(copiedHeaders, headerValues)
+ r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount]
+ copiedHeaders = copiedHeaders[headerCount:]
+ }
+ }
+ return r2
+}
+
+func canonicalPath(req *http.Request) string {
+ result := req.URL.EscapedPath()
+ if result == "" {
+ return "/"
+ }
+ return path.Clean(result)
+}
+
+func canonicalQuery(req *http.Request) string {
+ queryValues := req.URL.Query()
+ for queryKey := range queryValues {
+ sort.Strings(queryValues[queryKey])
+ }
+ return queryValues.Encode()
+}
+
+func canonicalHeaders(req *http.Request) (string, string) {
+ // Header keys need to be sorted alphabetically.
+ var headers []string
+ lowerCaseHeaders := make(http.Header)
+ for k, v := range req.Header {
+ k := strings.ToLower(k)
+ if _, ok := lowerCaseHeaders[k]; ok {
+ // include additional values
+ lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...)
+ } else {
+ headers = append(headers, k)
+ lowerCaseHeaders[k] = v
+ }
+ }
+ sort.Strings(headers)
+
+ var fullHeaders bytes.Buffer
+ for _, header := range headers {
+ headerValue := strings.Join(lowerCaseHeaders[header], ",")
+ fullHeaders.WriteString(header)
+ fullHeaders.WriteRune(':')
+ fullHeaders.WriteString(headerValue)
+ fullHeaders.WriteRune('\n')
+ }
+
+ return strings.Join(headers, ";"), fullHeaders.String()
+}
+
+func requestDataHash(req *http.Request) (string, error) {
+ var requestData []byte
+ if req.Body != nil {
+ requestBody, err := req.GetBody()
+ if err != nil {
+ return "", err
+ }
+ defer requestBody.Close()
+
+ requestData, err = internal.ReadAll(requestBody)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return getSha256(requestData)
+}
+
+func requestHost(req *http.Request) string {
+ if req.Host != "" {
+ return req.Host
+ }
+ return req.URL.Host
+}
+
+func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) {
+ dataHash, err := requestDataHash(req)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil
+}
+
+type awsRequestHeader struct {
+ Key string `json:"key"`
+ Value string `json:"value"`
+}
+
+type awsRequest struct {
+ URL string `json:"url"`
+ Method string `json:"method"`
+ Headers []awsRequestHeader `json:"headers"`
+}
+
+// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is
+// required.
+func canRetrieveRegionFromEnvironment() bool {
+ return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != ""
+}
+
+// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available.
+func canRetrieveSecurityCredentialFromEnvironment() bool {
+ return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != ""
+}
+
+func (sp *awsSubjectProvider) shouldUseMetadataServer() bool {
+ return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment())
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go
new file mode 100644
index 000000000..d5765c474
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go
@@ -0,0 +1,284 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "os/exec"
+ "regexp"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth/internal"
+)
+
+const (
+ executableSupportedMaxVersion = 1
+ executableDefaultTimeout = 30 * time.Second
+ executableSource = "response"
+ executableProviderType = "executable"
+ outputFileSource = "output file"
+
+ allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES"
+
+ jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
+ idTokenType = "urn:ietf:params:oauth:token-type:id_token"
+ saml2TokenType = "urn:ietf:params:oauth:token-type:saml2"
+)
+
+var (
+ serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`)
+)
+
+type nonCacheableError struct {
+ message string
+}
+
+func (nce nonCacheableError) Error() string {
+ return nce.message
+}
+
+// environment is a contract for testing
+type environment interface {
+ existingEnv() []string
+ getenv(string) string
+ run(ctx context.Context, command string, env []string) ([]byte, error)
+ now() time.Time
+}
+
+type runtimeEnvironment struct{}
+
+func (r runtimeEnvironment) existingEnv() []string {
+ return os.Environ()
+}
+func (r runtimeEnvironment) getenv(key string) string {
+ return os.Getenv(key)
+}
+func (r runtimeEnvironment) now() time.Time {
+ return time.Now().UTC()
+}
+
+func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) {
+ splitCommand := strings.Fields(command)
+ cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...)
+ cmd.Env = env
+
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ if err := cmd.Run(); err != nil {
+ if ctx.Err() == context.DeadlineExceeded {
+ return nil, context.DeadlineExceeded
+ }
+ if exitError, ok := err.(*exec.ExitError); ok {
+ return nil, exitCodeError(exitError)
+ }
+ return nil, executableError(err)
+ }
+
+ bytesStdout := bytes.TrimSpace(stdout.Bytes())
+ if len(bytesStdout) > 0 {
+ return bytesStdout, nil
+ }
+ return bytes.TrimSpace(stderr.Bytes()), nil
+}
+
+type executableSubjectProvider struct {
+ Command string
+ Timeout time.Duration
+ OutputFile string
+ client *http.Client
+ opts *Options
+ env environment
+}
+
+type executableResponse struct {
+ Version int `json:"version,omitempty"`
+ Success *bool `json:"success,omitempty"`
+ TokenType string `json:"token_type,omitempty"`
+ ExpirationTime int64 `json:"expiration_time,omitempty"`
+ IDToken string `json:"id_token,omitempty"`
+ SamlResponse string `json:"saml_response,omitempty"`
+ Code string `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) {
+ var result executableResponse
+ if err := json.Unmarshal(response, &result); err != nil {
+ return "", jsonParsingError(source, string(response))
+ }
+ // Validate
+ if result.Version == 0 {
+ return "", missingFieldError(source, "version")
+ }
+ if result.Success == nil {
+ return "", missingFieldError(source, "success")
+ }
+ if !*result.Success {
+ if result.Code == "" || result.Message == "" {
+ return "", malformedFailureError()
+ }
+ return "", userDefinedError(result.Code, result.Message)
+ }
+ if result.Version > executableSupportedMaxVersion || result.Version < 0 {
+ return "", unsupportedVersionError(source, result.Version)
+ }
+ if result.ExpirationTime == 0 && sp.OutputFile != "" {
+ return "", missingFieldError(source, "expiration_time")
+ }
+ if result.TokenType == "" {
+ return "", missingFieldError(source, "token_type")
+ }
+ if result.ExpirationTime != 0 && result.ExpirationTime < now {
+ return "", tokenExpiredError()
+ }
+
+ switch result.TokenType {
+ case jwtTokenType, idTokenType:
+ if result.IDToken == "" {
+ return "", missingFieldError(source, "id_token")
+ }
+ return result.IDToken, nil
+ case saml2TokenType:
+ if result.SamlResponse == "" {
+ return "", missingFieldError(source, "saml_response")
+ }
+ return result.SamlResponse, nil
+ default:
+ return "", tokenTypeError(source)
+ }
+}
+
+func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) {
+ if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil {
+ return token, err
+ }
+ return sp.getTokenFromExecutableCommand(ctx)
+}
+
+func (sp *executableSubjectProvider) providerType() string {
+ return executableProviderType
+}
+
+func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) {
+ if sp.OutputFile == "" {
+ // This ExecutableCredentialSource doesn't use an OutputFile.
+ return "", nil
+ }
+
+ file, err := os.Open(sp.OutputFile)
+ if err != nil {
+ // No OutputFile found. Hasn't been created yet, so skip it.
+ return "", nil
+ }
+ defer file.Close()
+
+ data, err := internal.ReadAll(file)
+ if err != nil || len(data) == 0 {
+ // Cachefile exists, but no data found. Get new credential.
+ return "", nil
+ }
+
+ token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix())
+ if err != nil {
+ if _, ok := err.(nonCacheableError); ok {
+ // If the cached token is expired we need a new token,
+ // and if the cache contains a failure, we need to try again.
+ return "", nil
+ }
+
+ // There was an error in the cached token, and the developer should be aware of it.
+ return "", err
+ }
+ // Token parsing succeeded. Use found token.
+ return token, nil
+}
+
+func (sp *executableSubjectProvider) executableEnvironment() []string {
+ result := sp.env.existingEnv()
+ result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience))
+ result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType))
+ result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0")
+ if sp.opts.ServiceAccountImpersonationURL != "" {
+ matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL)
+ if matches != nil {
+ result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1]))
+ }
+ }
+ if sp.OutputFile != "" {
+ result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile))
+ }
+ return result
+}
+
+func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) {
+ // For security reasons, we need our consumers to set this environment variable to allow executables to be run.
+ if sp.env.getenv(allowExecutablesEnvVar) != "1" {
+ return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run")
+ }
+
+ ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout))
+ defer cancel()
+
+ output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment())
+ if err != nil {
+ return "", err
+ }
+ return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix())
+}
+
+func missingFieldError(source, field string) error {
+ return fmt.Errorf("credentials: %q missing %q field", source, field)
+}
+
+func jsonParsingError(source, data string) error {
+ return fmt.Errorf("credentials: unable to parse %q: %v", source, data)
+}
+
+func malformedFailureError() error {
+ return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"}
+}
+
+func userDefinedError(code, message string) error {
+ return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)}
+}
+
+func unsupportedVersionError(source string, version int) error {
+ return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version)
+}
+
+func tokenExpiredError() error {
+ return nonCacheableError{"credentials: the token returned by the executable is expired"}
+}
+
+func tokenTypeError(source string) error {
+ return fmt.Errorf("credentials: %v contains unsupported token type", source)
+}
+
+func exitCodeError(err *exec.ExitError) error {
+ return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err)
+}
+
+func executableError(err error) error {
+ return fmt.Errorf("credentials: executable command failed: %w", err)
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
new file mode 100644
index 000000000..a82206423
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
@@ -0,0 +1,428 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/credentials/internal/impersonate"
+ "cloud.google.com/go/auth/credentials/internal/stsexchange"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ timeoutMinimum = 5 * time.Second
+ timeoutMaximum = 120 * time.Second
+
+ universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+ defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token"
+ defaultUniverseDomain = "googleapis.com"
+)
+
+var (
+ // Now aliases time.Now for testing
+ Now = func() time.Time {
+ return time.Now().UTC()
+ }
+ validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`)
+)
+
+// Options stores the configuration for fetching tokens with external credentials.
+type Options struct {
+ // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload
+ // identity pool or the workforce pool and the provider identifier in that pool.
+ Audience string
+ // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec
+ // e.g. `urn:ietf:params:oauth:token-type:jwt`.
+ SubjectTokenType string
+ // TokenURL is the STS token exchange endpoint.
+ TokenURL string
+ // TokenInfoURL is the token_info endpoint used to retrieve the account related information (
+ // user attributes like account identifier, eg. email, username, uid, etc). This is
+ // needed for gCloud session account identification.
+ TokenInfoURL string
+ // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only
+ // required for workload identity pools when APIs to be accessed have not integrated with UberMint.
+ ServiceAccountImpersonationURL string
+ // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation
+ // token will be valid for.
+ ServiceAccountImpersonationLifetimeSeconds int
+ // ClientSecret is currently only required if token_info endpoint also
+ // needs to be called with the generated GCP access token. When provided, STS will be
+ // called with additional basic authentication using client_id as username and client_secret as password.
+ ClientSecret string
+ // ClientID is only required in conjunction with ClientSecret, as described above.
+ ClientID string
+ // CredentialSource contains the necessary information to retrieve the token itself, as well
+ // as some environmental information.
+ CredentialSource *credsfile.CredentialSource
+ // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries
+ // will set the x-goog-user-project which overrides the project associated with the credentials.
+ QuotaProjectID string
+ // Scopes contains the desired scopes for the returned access token.
+ Scopes []string
+ // WorkforcePoolUserProject should be set when it is a workforce pool and
+ // not a workload identity pool. The underlying principal must still have
+ // serviceusage.services.use IAM permission to use the project for
+ // billing/quota. Optional.
+ WorkforcePoolUserProject string
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ // This value will be used in the default STS token URL. The default value
+ // is "googleapis.com". It will not be used if TokenURL is set. Optional.
+ UniverseDomain string
+ // SubjectTokenProvider is an optional token provider for OIDC/SAML
+ // credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider
+ // or CredentialSource must be provided. Optional.
+ SubjectTokenProvider SubjectTokenProvider
+ // AwsSecurityCredentialsProvider is an AWS Security Credential provider
+ // for AWS credentials. One of SubjectTokenProvider,
+ // AWSSecurityCredentialProvider or CredentialSource must be provided. Optional.
+ AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider
+ // Client for token request.
+ Client *http.Client
+ // IsDefaultClient marks whether the client passed in is a default client that can be overriden.
+ // This is important for X509 credentials which should create a new client if the default was used
+ // but should respect a client explicitly passed in by the user.
+ IsDefaultClient bool
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+// SubjectTokenProvider can be used to supply a subject token to exchange for a
+// GCP access token.
+type SubjectTokenProvider interface {
+ // SubjectToken should return a valid subject token or an error.
+ // The external account token provider does not cache the returned subject
+ // token, so caching logic should be implemented in the provider to prevent
+ // multiple requests for the same subject token.
+ SubjectToken(ctx context.Context, opts *RequestOptions) (string, error)
+}
+
+// RequestOptions contains information about the requested subject token or AWS
+// security credentials from the Google external account credential.
+type RequestOptions struct {
+ // Audience is the requested audience for the external account credential.
+ Audience string
+ // Subject token type is the requested subject token type for the external
+ // account credential. Expected values include:
+ // “urn:ietf:params:oauth:token-type:jwt”
+ // “urn:ietf:params:oauth:token-type:id-token”
+ // “urn:ietf:params:oauth:token-type:saml2”
+ // “urn:ietf:params:aws:token-type:aws4_request”
+ SubjectTokenType string
+}
+
+// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials
+// and an AWS Region to exchange for a GCP access token.
+type AwsSecurityCredentialsProvider interface {
+ // AwsRegion should return the AWS region or an error.
+ AwsRegion(ctx context.Context, opts *RequestOptions) (string, error)
+ // GetAwsSecurityCredentials should return a valid set of
+ // AwsSecurityCredentials or an error. The external account token provider
+ // does not cache the returned security credentials, so caching logic should
+ // be implemented in the provider to prevent multiple requests for the
+ // same security credentials.
+ AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error)
+}
+
+// AwsSecurityCredentials models AWS security credentials.
+type AwsSecurityCredentials struct {
+ // AccessKeyId is the AWS Access Key ID - Required.
+ AccessKeyID string `json:"AccessKeyID"`
+ // SecretAccessKey is the AWS Secret Access Key - Required.
+ SecretAccessKey string `json:"SecretAccessKey"`
+ // SessionToken is the AWS Session token. This should be provided for
+ // temporary AWS security credentials - Optional.
+ SessionToken string `json:"Token"`
+}
+
+func (o *Options) validate() error {
+ if o.Audience == "" {
+ return fmt.Errorf("externalaccount: Audience must be set")
+ }
+ if o.SubjectTokenType == "" {
+ return fmt.Errorf("externalaccount: Subject token type must be set")
+ }
+ if o.WorkforcePoolUserProject != "" {
+ if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid {
+ return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials")
+ }
+ }
+ count := 0
+ if o.CredentialSource != nil {
+ count++
+ }
+ if o.SubjectTokenProvider != nil {
+ count++
+ }
+ if o.AwsSecurityCredentialsProvider != nil {
+ count++
+ }
+ if count == 0 {
+ return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
+ }
+ if count > 1 {
+ return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
+ }
+ return nil
+}
+
+// client returns the http client that should be used for the token exchange. If a non-default client
+// is provided, then the client configured in the options will always be returned. If a default client
+// is provided and the options are configured for X509 credentials, a new client will be created.
+func (o *Options) client() (*http.Client, error) {
+ // If a client was provided and no override certificate config location was provided, use the provided client.
+ if o.CredentialSource == nil || o.CredentialSource.Certificate == nil || (!o.IsDefaultClient && o.CredentialSource.Certificate.CertificateConfigLocation == "") {
+ return o.Client, nil
+ }
+
+ // If a new client should be created, validate and use the certificate source to create a new mTLS client.
+ cert := o.CredentialSource.Certificate
+ if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" {
+ return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true")
+ }
+ if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" {
+ return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true")
+ }
+ return createX509Client(cert.CertificateConfigLocation)
+}
+
+// resolveTokenURL sets the default STS token endpoint with the configured
+// universe domain.
+func (o *Options) resolveTokenURL() {
+ if o.TokenURL != "" {
+ return
+ } else if o.UniverseDomain != "" {
+ o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1)
+ } else {
+ o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1)
+ }
+}
+
+// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
+// configured with the provided options.
+func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ opts.resolveTokenURL()
+ logger := internallog.New(opts.Logger)
+ stp, err := newSubjectTokenProvider(opts)
+ if err != nil {
+ return nil, err
+ }
+
+ client, err := opts.client()
+ if err != nil {
+ return nil, err
+ }
+
+ tp := &tokenProvider{
+ client: client,
+ opts: opts,
+ stp: stp,
+ logger: logger,
+ }
+
+ if opts.ServiceAccountImpersonationURL == "" {
+ return auth.NewCachedTokenProvider(tp, nil), nil
+ }
+
+ scopes := make([]string, len(opts.Scopes))
+ copy(scopes, opts.Scopes)
+ // needed for impersonation
+ tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"}
+ imp, err := impersonate.NewTokenProvider(&impersonate.Options{
+ Client: client,
+ URL: opts.ServiceAccountImpersonationURL,
+ Scopes: scopes,
+ Tp: auth.NewCachedTokenProvider(tp, nil),
+ TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds,
+ Logger: logger,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return auth.NewCachedTokenProvider(imp, nil), nil
+}
+
+type subjectTokenProvider interface {
+ subjectToken(ctx context.Context) (string, error)
+ providerType() string
+}
+
+// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens.
+type tokenProvider struct {
+ client *http.Client
+ logger *slog.Logger
+ opts *Options
+ stp subjectTokenProvider
+}
+
+func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
+ subjectToken, err := tp.stp.subjectToken(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ stsRequest := &stsexchange.TokenRequest{
+ GrantType: stsexchange.GrantType,
+ Audience: tp.opts.Audience,
+ Scope: tp.opts.Scopes,
+ RequestedTokenType: stsexchange.TokenType,
+ SubjectToken: subjectToken,
+ SubjectTokenType: tp.opts.SubjectTokenType,
+ }
+ header := make(http.Header)
+ header.Set("Content-Type", "application/x-www-form-urlencoded")
+ header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp))
+ clientAuth := stsexchange.ClientAuthentication{
+ AuthStyle: auth.StyleInHeader,
+ ClientID: tp.opts.ClientID,
+ ClientSecret: tp.opts.ClientSecret,
+ }
+ var options map[string]interface{}
+ // Do not pass workforce_pool_user_project when client authentication is used.
+ // The client ID is sufficient for determining the user project.
+ if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" {
+ options = map[string]interface{}{
+ "userProject": tp.opts.WorkforcePoolUserProject,
+ }
+ }
+ stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{
+ Client: tp.client,
+ Endpoint: tp.opts.TokenURL,
+ Request: stsRequest,
+ Authentication: clientAuth,
+ Headers: header,
+ ExtraOpts: options,
+ Logger: tp.logger,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ tok := &auth.Token{
+ Value: stsResp.AccessToken,
+ Type: stsResp.TokenType,
+ }
+ // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior.
+ if stsResp.ExpiresIn <= 0 {
+ return nil, fmt.Errorf("credentials: got invalid expiry from security token service")
+ }
+ tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second)
+ return tok, nil
+}
+
+// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a
+// subjectTokenProvider
+func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
+ logger := internallog.New(o.Logger)
+ reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType}
+ if o.AwsSecurityCredentialsProvider != nil {
+ return &awsSubjectProvider{
+ securityCredentialsProvider: o.AwsSecurityCredentialsProvider,
+ TargetResource: o.Audience,
+ reqOpts: reqOpts,
+ logger: logger,
+ }, nil
+ } else if o.SubjectTokenProvider != nil {
+ return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil
+ } else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" {
+ if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil {
+ if awsVersion != 1 {
+ return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion)
+ }
+
+ awsProvider := &awsSubjectProvider{
+ EnvironmentID: o.CredentialSource.EnvironmentID,
+ RegionURL: o.CredentialSource.RegionURL,
+ RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL,
+ CredVerificationURL: o.CredentialSource.URL,
+ TargetResource: o.Audience,
+ Client: o.Client,
+ logger: logger,
+ }
+ if o.CredentialSource.IMDSv2SessionTokenURL != "" {
+ awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL
+ }
+
+ return awsProvider, nil
+ }
+ } else if o.CredentialSource.File != "" {
+ return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil
+ } else if o.CredentialSource.URL != "" {
+ return &urlSubjectProvider{
+ URL: o.CredentialSource.URL,
+ Headers: o.CredentialSource.Headers,
+ Format: o.CredentialSource.Format,
+ Client: o.Client,
+ Logger: logger,
+ }, nil
+ } else if o.CredentialSource.Executable != nil {
+ ec := o.CredentialSource.Executable
+ if ec.Command == "" {
+ return nil, errors.New("credentials: missing `command` field — executable command must be provided")
+ }
+
+ execProvider := &executableSubjectProvider{}
+ execProvider.Command = ec.Command
+ if ec.TimeoutMillis == 0 {
+ execProvider.Timeout = executableDefaultTimeout
+ } else {
+ execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond
+ if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum {
+ return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds())
+ }
+ }
+ execProvider.OutputFile = ec.OutputFile
+ execProvider.client = o.Client
+ execProvider.opts = o
+ execProvider.env = runtimeEnvironment{}
+ return execProvider, nil
+ } else if o.CredentialSource.Certificate != nil {
+ cert := o.CredentialSource.Certificate
+ if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" {
+ return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true")
+ }
+ if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" {
+ return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true")
+ }
+ return &x509Provider{}, nil
+ }
+ return nil, errors.New("credentials: unable to parse credential source")
+}
+
+func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string {
+ return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t",
+ goVersion(),
+ "unknown",
+ p.providerType(),
+ conf.ServiceAccountImpersonationURL != "",
+ conf.ServiceAccountImpersonationLifetimeSeconds != 0)
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go
new file mode 100644
index 000000000..8186939fe
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go
@@ -0,0 +1,78 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+)
+
+const (
+ fileProviderType = "file"
+)
+
+type fileSubjectProvider struct {
+ File string
+ Format *credsfile.Format
+}
+
+func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) {
+ tokenFile, err := os.Open(sp.File)
+ if err != nil {
+ return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err)
+ }
+ defer tokenFile.Close()
+ tokenBytes, err := internal.ReadAll(tokenFile)
+ if err != nil {
+ return "", fmt.Errorf("credentials: failed to read credential file: %w", err)
+ }
+ tokenBytes = bytes.TrimSpace(tokenBytes)
+
+ if sp.Format == nil {
+ return string(tokenBytes), nil
+ }
+ switch sp.Format.Type {
+ case fileTypeJSON:
+ jsonData := make(map[string]interface{})
+ err = json.Unmarshal(tokenBytes, &jsonData)
+ if err != nil {
+ return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
+ }
+ val, ok := jsonData[sp.Format.SubjectTokenFieldName]
+ if !ok {
+ return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
+ }
+ token, ok := val.(string)
+ if !ok {
+ return "", errors.New("credentials: improperly formatted subject token")
+ }
+ return token, nil
+ case fileTypeText:
+ return string(tokenBytes), nil
+ default:
+ return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
+ }
+}
+
+func (sp *fileSubjectProvider) providerType() string {
+ return fileProviderType
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go
new file mode 100644
index 000000000..8e4b4379b
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go
@@ -0,0 +1,74 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "runtime"
+ "strings"
+ "unicode"
+)
+
+var (
+ // version is a package internal global variable for testing purposes.
+ version = runtime.Version
+)
+
+// versionUnknown is only used when the runtime version cannot be determined.
+const versionUnknown = "UNKNOWN"
+
+// goVersion returns a Go runtime version derived from the runtime environment
+// that is modified to be suitable for reporting in a header, meaning it has no
+// whitespace. If it is unable to determine the Go runtime version, it returns
+// versionUnknown.
+func goVersion() string {
+ const develPrefix = "devel +"
+
+ s := version()
+ if strings.HasPrefix(s, develPrefix) {
+ s = s[len(develPrefix):]
+ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+ return s
+ } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+
+ notSemverRune := func(r rune) bool {
+ return !strings.ContainsRune("0123456789.", r)
+ }
+
+ if strings.HasPrefix(s, "go1") {
+ s = s[2:]
+ var prerelease string
+ if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+ s, prerelease = s[:p], s[p:]
+ }
+ if strings.HasSuffix(s, ".") {
+ s += "0"
+ } else if strings.Count(s, ".") < 2 {
+ s += ".0"
+ }
+ if prerelease != "" {
+ // Some release candidates already have a dash in them.
+ if !strings.HasPrefix(prerelease, "-") {
+ prerelease = "-" + prerelease
+ }
+ s += prerelease
+ }
+ return s
+ }
+ return versionUnknown
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go
new file mode 100644
index 000000000..be3c87351
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go
@@ -0,0 +1,30 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import "context"
+
+type programmaticProvider struct {
+ opts *RequestOptions
+ stp SubjectTokenProvider
+}
+
+func (pp *programmaticProvider) providerType() string {
+ return programmaticProviderType
+}
+
+func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) {
+ return pp.stp.SubjectToken(ctx, pp.opts)
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
new file mode 100644
index 000000000..754ecf4fe
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
@@ -0,0 +1,93 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ fileTypeText = "text"
+ fileTypeJSON = "json"
+ urlProviderType = "url"
+ programmaticProviderType = "programmatic"
+ x509ProviderType = "x509"
+)
+
+type urlSubjectProvider struct {
+ URL string
+ Headers map[string]string
+ Format *credsfile.Format
+ Client *http.Client
+ Logger *slog.Logger
+}
+
+func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) {
+ req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil)
+ if err != nil {
+ return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err)
+ }
+
+ for key, val := range sp.Headers {
+ req.Header.Add(key, val)
+ }
+ sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil))
+ resp, body, err := internal.DoRequest(sp.Client, req)
+ if err != nil {
+ return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err)
+ }
+ sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
+ return "", fmt.Errorf("credentials: status code %d: %s", c, body)
+ }
+
+ if sp.Format == nil {
+ return string(body), nil
+ }
+ switch sp.Format.Type {
+ case "json":
+ jsonData := make(map[string]interface{})
+ err = json.Unmarshal(body, &jsonData)
+ if err != nil {
+ return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
+ }
+ val, ok := jsonData[sp.Format.SubjectTokenFieldName]
+ if !ok {
+ return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
+ }
+ token, ok := val.(string)
+ if !ok {
+ return "", errors.New("credentials: improperly formatted subject token")
+ }
+ return token, nil
+ case fileTypeText:
+ return string(body), nil
+ default:
+ return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
+ }
+}
+
+func (sp *urlSubjectProvider) providerType() string {
+ return urlProviderType
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go
new file mode 100644
index 000000000..115df5881
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go
@@ -0,0 +1,63 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "time"
+
+ "cloud.google.com/go/auth/internal/transport/cert"
+)
+
+// x509Provider implements the subjectTokenProvider type for
+// x509 workload identity credentials. Because x509 credentials
+// rely on an mTLS connection to represent the 3rd party identity
+// rather than a subject token, this provider will always return
+// an empty string when a subject token is requested by the external account
+// token provider.
+type x509Provider struct {
+}
+
+func (xp *x509Provider) providerType() string {
+ return x509ProviderType
+}
+
+func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) {
+ return "", nil
+}
+
+// createX509Client creates a new client that is configured with mTLS, using the
+// certificate configuration specified in the credential source.
+func createX509Client(certificateConfigLocation string) (*http.Client, error) {
+ certProvider, err := cert.NewWorkloadX509CertProvider(certificateConfigLocation)
+ if err != nil {
+ return nil, err
+ }
+ trans := http.DefaultTransport.(*http.Transport).Clone()
+
+ trans.TLSClientConfig = &tls.Config{
+ GetClientCertificate: certProvider,
+ }
+
+ // Create a client with default settings plus the X509 workload cert and key.
+ client := &http.Client{
+ Transport: trans,
+ Timeout: 30 * time.Second,
+ }
+
+ return client, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
new file mode 100644
index 000000000..ae39206e5
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
@@ -0,0 +1,115 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccountuser
+
+import (
+ "context"
+ "errors"
+ "log/slog"
+ "net/http"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/credentials/internal/stsexchange"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+// Options stores the configuration for fetching tokens with external authorized
+// user credentials.
+type Options struct {
+ // Audience is the Secure Token Service (STS) audience which contains the
+ // resource name for the workforce pool and the provider identifier in that
+ // pool.
+ Audience string
+ // RefreshToken is the OAuth 2.0 refresh token.
+ RefreshToken string
+ // TokenURL is the STS token exchange endpoint for refresh.
+ TokenURL string
+ // TokenInfoURL is the STS endpoint URL for token introspection. Optional.
+ TokenInfoURL string
+ // ClientID is only required in conjunction with ClientSecret, as described
+ // below.
+ ClientID string
+ // ClientSecret is currently only required if token_info endpoint also needs
+ // to be called with the generated a cloud access token. When provided, STS
+ // will be called with additional basic authentication using client_id as
+ // username and client_secret as password.
+ ClientSecret string
+ // Scopes contains the desired scopes for the returned access token.
+ Scopes []string
+
+ // Client for token request.
+ Client *http.Client
+ // Logger for logging.
+ Logger *slog.Logger
+}
+
+func (c *Options) validate() bool {
+ return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != ""
+}
+
+// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
+// configured with the provided options.
+func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
+ if !opts.validate() {
+ return nil, errors.New("credentials: invalid external_account_authorized_user configuration")
+ }
+
+ tp := &tokenProvider{
+ o: opts,
+ }
+ return auth.NewCachedTokenProvider(tp, nil), nil
+}
+
+type tokenProvider struct {
+ o *Options
+}
+
+func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
+ opts := tp.o
+
+ clientAuth := stsexchange.ClientAuthentication{
+ AuthStyle: auth.StyleInHeader,
+ ClientID: opts.ClientID,
+ ClientSecret: opts.ClientSecret,
+ }
+ headers := make(http.Header)
+ headers.Set("Content-Type", "application/x-www-form-urlencoded")
+ stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{
+ Client: opts.Client,
+ Endpoint: opts.TokenURL,
+ RefreshToken: opts.RefreshToken,
+ Authentication: clientAuth,
+ Headers: headers,
+ Logger: internallog.New(tp.o.Logger),
+ })
+ if err != nil {
+ return nil, err
+ }
+ if stsResponse.ExpiresIn < 0 {
+ return nil, errors.New("credentials: invalid expiry from security token service")
+ }
+
+ // guarded by the wrapping with CachedTokenProvider
+ if stsResponse.RefreshToken != "" {
+ opts.RefreshToken = stsResponse.RefreshToken
+ }
+ return &auth.Token{
+ Value: stsResponse.AccessToken,
+ Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second),
+ Type: internal.TokenTypeBearer,
+ }, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
new file mode 100644
index 000000000..c2d320fdf
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
@@ -0,0 +1,191 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gdch
+
+import (
+ "context"
+ "crypto"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "cloud.google.com/go/auth/internal/jwt"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ // GrantType is the grant type for the token request.
+ GrantType = "urn:ietf:params:oauth:token-type:token-exchange"
+ requestTokenType = "urn:ietf:params:oauth:token-type:access_token"
+ subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount"
+)
+
+var (
+ gdchSupportFormatVersions map[string]bool = map[string]bool{
+ "1": true,
+ }
+)
+
+// Options for [NewTokenProvider].
+type Options struct {
+ STSAudience string
+ Client *http.Client
+ Logger *slog.Logger
+}
+
+// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a
+// GDCH cred file.
+func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) {
+ if !gdchSupportFormatVersions[f.FormatVersion] {
+ return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion)
+ }
+ if o.STSAudience == "" {
+ return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows")
+ }
+ signer, err := internal.ParseKey([]byte(f.PrivateKey))
+ if err != nil {
+ return nil, err
+ }
+ certPool, err := loadCertPool(f.CertPath)
+ if err != nil {
+ return nil, err
+ }
+
+ tp := gdchProvider{
+ serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name),
+ tokenURL: f.TokenURL,
+ aud: o.STSAudience,
+ signer: signer,
+ pkID: f.PrivateKeyID,
+ certPool: certPool,
+ client: o.Client,
+ logger: internallog.New(o.Logger),
+ }
+ return tp, nil
+}
+
+func loadCertPool(path string) (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+ pem, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to read certificate: %w", err)
+ }
+ pool.AppendCertsFromPEM(pem)
+ return pool, nil
+}
+
+type gdchProvider struct {
+ serviceIdentity string
+ tokenURL string
+ aud string
+ signer crypto.Signer
+ pkID string
+ certPool *x509.CertPool
+
+ client *http.Client
+ logger *slog.Logger
+}
+
+func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
+ addCertToTransport(g.client, g.certPool)
+ iat := time.Now()
+ exp := iat.Add(time.Hour)
+ claims := jwt.Claims{
+ Iss: g.serviceIdentity,
+ Sub: g.serviceIdentity,
+ Aud: g.tokenURL,
+ Iat: iat.Unix(),
+ Exp: exp.Unix(),
+ }
+ h := jwt.Header{
+ Algorithm: jwt.HeaderAlgRSA256,
+ Type: jwt.HeaderType,
+ KeyID: string(g.pkID),
+ }
+ payload, err := jwt.EncodeJWS(&h, &claims, g.signer)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", GrantType)
+ v.Set("audience", g.aud)
+ v.Set("requested_token_type", requestTokenType)
+ v.Set("subject_token", payload)
+ v.Set("subject_token_type", subjectTokenType)
+
+ req, err := http.NewRequestWithContext(ctx, "POST", g.tokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
+ resp, body, err := internal.DoRequest(g.client, req)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
+ }
+ g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
+ return nil, &auth.Error{
+ Response: resp,
+ Body: body,
+ }
+ }
+
+ var tokenRes struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ ExpiresIn int64 `json:"expires_in"` // relative seconds from now
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
+ }
+ token := &auth.Token{
+ Value: tokenRes.AccessToken,
+ Type: tokenRes.TokenType,
+ }
+ raw := make(map[string]interface{})
+ json.Unmarshal(body, &raw) // no error checks for optional fields
+ token.Metadata = raw
+
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ return token, nil
+}
+
+// addCertToTransport makes a best effort attempt at adding in the cert info to
+// the client. It tries to keep all configured transport settings if the
+// underlying transport is an http.Transport. Or else it overwrites the
+// transport with defaults adding in the certs.
+func addCertToTransport(hc *http.Client, certPool *x509.CertPool) {
+ trans, ok := hc.Transport.(*http.Transport)
+ if !ok {
+ trans = http.DefaultTransport.(*http.Transport).Clone()
+ }
+ trans.TLSClientConfig = &tls.Config{
+ RootCAs: certPool,
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go
new file mode 100644
index 000000000..705462c16
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go
@@ -0,0 +1,105 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package impersonate
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+var (
+ universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+ iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN"
+)
+
+// IDTokenIAMOptions provides configuration for [IDTokenIAMOptions.Token].
+type IDTokenIAMOptions struct {
+ // Client is required.
+ Client *http.Client
+ // Logger is required.
+ Logger *slog.Logger
+ UniverseDomain auth.CredentialsPropertyProvider
+ ServiceAccountEmail string
+ GenerateIDTokenRequest
+}
+
+// GenerateIDTokenRequest holds the request to the IAM generateIdToken RPC.
+type GenerateIDTokenRequest struct {
+ Audience string `json:"audience"`
+ IncludeEmail bool `json:"includeEmail"`
+ // Delegates are the ordered, fully-qualified resource name for service
+ // accounts in a delegation chain. Each service account must be granted
+ // roles/iam.serviceAccountTokenCreator on the next service account in the
+ // chain. The delegates must have the following format:
+ // projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}. The - wildcard
+ // character is required; replacing it with a project ID is invalid.
+ // Optional.
+ Delegates []string `json:"delegates,omitempty"`
+}
+
+// GenerateIDTokenResponse holds the response from the IAM generateIdToken RPC.
+type GenerateIDTokenResponse struct {
+ Token string `json:"token"`
+}
+
+// Token call IAM generateIdToken with the configuration provided in [IDTokenIAMOptions].
+func (o IDTokenIAMOptions) Token(ctx context.Context) (*auth.Token, error) {
+ universeDomain, err := o.UniverseDomain.GetProperty(ctx)
+ if err != nil {
+ return nil, err
+ }
+ endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1)
+ url := fmt.Sprintf("%s/v1/%s:generateIdToken", endpoint, internal.FormatIAMServiceAccountResource(o.ServiceAccountEmail))
+
+ bodyBytes, err := json.Marshal(o.GenerateIDTokenRequest)
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes))
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to create request: %w", err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+ o.Logger.DebugContext(ctx, "impersonated idtoken request", "request", internallog.HTTPRequest(req, bodyBytes))
+ resp, body, err := internal.DoRequest(o.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err)
+ }
+ o.Logger.DebugContext(ctx, "impersonated idtoken response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, fmt.Errorf("impersonate: status code %d: %s", c, body)
+ }
+
+ var tokenResp GenerateIDTokenResponse
+ if err := json.Unmarshal(body, &tokenResp); err != nil {
+ return nil, fmt.Errorf("impersonate: unable to parse response: %w", err)
+ }
+ return &auth.Token{
+ Value: tokenResp.Token,
+ // Generated ID tokens are good for one hour.
+ Expiry: time.Now().Add(1 * time.Hour),
+ }, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
new file mode 100644
index 000000000..b3a99261f
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
@@ -0,0 +1,156 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package impersonate
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ defaultTokenLifetime = "3600s"
+ authHeaderKey = "Authorization"
+)
+
+// generateAccesstokenReq is used for service account impersonation
+type generateAccessTokenReq struct {
+ Delegates []string `json:"delegates,omitempty"`
+ Lifetime string `json:"lifetime,omitempty"`
+ Scope []string `json:"scope,omitempty"`
+}
+
+type impersonateTokenResponse struct {
+ AccessToken string `json:"accessToken"`
+ ExpireTime string `json:"expireTime"`
+}
+
+// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL.
+// Scopes can be defined when the access token is requested.
+func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ return opts, nil
+}
+
+// Options for [NewTokenProvider].
+type Options struct {
+ // Tp is the source credential used to generate a token on the
+ // impersonated service account. Required.
+ Tp auth.TokenProvider
+
+ // URL is the endpoint to call to generate a token
+ // on behalf of the service account. Required.
+ URL string
+ // Scopes that the impersonated credential should have. Required.
+ Scopes []string
+ // Delegates are the service account email addresses in a delegation chain.
+ // Each service account must be granted roles/iam.serviceAccountTokenCreator
+ // on the next service account in the chain. Optional.
+ Delegates []string
+ // TokenLifetimeSeconds is the number of seconds the impersonation token will
+ // be valid for. Defaults to 1 hour if unset. Optional.
+ TokenLifetimeSeconds int
+ // Client configures the underlying client used to make network requests
+ // when fetching tokens. Required.
+ Client *http.Client
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+func (o *Options) validate() error {
+ if o.Tp == nil {
+ return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials")
+ }
+ if o.URL == "" {
+ return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials")
+ }
+ return nil
+}
+
+// Token performs the exchange to get a temporary service account token to allow access to GCP.
+func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
+ logger := internallog.New(o.Logger)
+ lifetime := defaultTokenLifetime
+ if o.TokenLifetimeSeconds != 0 {
+ lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds)
+ }
+ reqBody := generateAccessTokenReq{
+ Lifetime: lifetime,
+ Scope: o.Scopes,
+ Delegates: o.Delegates,
+ }
+ b, err := json.Marshal(reqBody)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: unable to marshal request: %w", err)
+ }
+ req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+ if err := setAuthHeader(ctx, o.Tp, req); err != nil {
+ return nil, err
+ }
+ logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b))
+ resp, body, err := internal.DoRequest(o.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: unable to generate access token: %w", err)
+ }
+ logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
+ return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
+ }
+
+ var accessTokenResp impersonateTokenResponse
+ if err := json.Unmarshal(body, &accessTokenResp); err != nil {
+ return nil, fmt.Errorf("credentials: unable to parse response: %w", err)
+ }
+ expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err)
+ }
+ return &auth.Token{
+ Value: accessTokenResp.AccessToken,
+ Expiry: expiry,
+ Type: internal.TokenTypeBearer,
+ }, nil
+}
+
+func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error {
+ t, err := tp.Token(ctx)
+ if err != nil {
+ return err
+ }
+ typ := t.Type
+ if typ == "" {
+ typ = internal.TokenTypeBearer
+ }
+ r.Header.Set(authHeaderKey, typ+" "+t.Value)
+ return nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
new file mode 100644
index 000000000..e1d2b1503
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
@@ -0,0 +1,167 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stsexchange
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+const (
+ // GrantType for a sts exchange.
+ GrantType = "urn:ietf:params:oauth:grant-type:token-exchange"
+ // TokenType for a sts exchange.
+ TokenType = "urn:ietf:params:oauth:token-type:access_token"
+
+ jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
+)
+
+// Options stores the configuration for making an sts exchange request.
+type Options struct {
+ Client *http.Client
+ Logger *slog.Logger
+ Endpoint string
+ Request *TokenRequest
+ Authentication ClientAuthentication
+ Headers http.Header
+ // ExtraOpts are optional fields marshalled into the `options` field of the
+ // request body.
+ ExtraOpts map[string]interface{}
+ RefreshToken string
+}
+
+// RefreshAccessToken performs the token exchange using a refresh token flow.
+func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
+ data := url.Values{}
+ data.Set("grant_type", "refresh_token")
+ data.Set("refresh_token", opts.RefreshToken)
+ return doRequest(ctx, opts, data)
+}
+
+// ExchangeToken performs an oauth2 token exchange with the provided endpoint.
+func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
+ data := url.Values{}
+ data.Set("audience", opts.Request.Audience)
+ data.Set("grant_type", GrantType)
+ data.Set("requested_token_type", TokenType)
+ data.Set("subject_token_type", opts.Request.SubjectTokenType)
+ data.Set("subject_token", opts.Request.SubjectToken)
+ data.Set("scope", strings.Join(opts.Request.Scope, " "))
+ if opts.ExtraOpts != nil {
+ opts, err := json.Marshal(opts.ExtraOpts)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err)
+ }
+ data.Set("options", string(opts))
+ }
+ return doRequest(ctx, opts, data)
+}
+
+func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) {
+ opts.Authentication.InjectAuthentication(data, opts.Headers)
+ encodedData := data.Encode()
+ logger := internallog.New(opts.Logger)
+
+ req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData))
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err)
+
+ }
+ for key, list := range opts.Headers {
+ for _, val := range list {
+ req.Header.Add(key, val)
+ }
+ }
+ req.Header.Set("Content-Length", strconv.Itoa(len(encodedData)))
+
+ logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData)))
+ resp, body, err := internal.DoRequest(opts.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err)
+ }
+ logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
+ return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
+ }
+ var stsResp TokenResponse
+ if err := json.Unmarshal(body, &stsResp); err != nil {
+ return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err)
+ }
+
+ return &stsResp, nil
+}
+
+// TokenRequest contains fields necessary to make an oauth2 token
+// exchange.
+type TokenRequest struct {
+ ActingParty struct {
+ ActorToken string
+ ActorTokenType string
+ }
+ GrantType string
+ Resource string
+ Audience string
+ Scope []string
+ RequestedTokenType string
+ SubjectToken string
+ SubjectTokenType string
+}
+
+// TokenResponse is used to decode the remote server response during
+// an oauth2 token exchange.
+type TokenResponse struct {
+ AccessToken string `json:"access_token"`
+ IssuedTokenType string `json:"issued_token_type"`
+ TokenType string `json:"token_type"`
+ ExpiresIn int `json:"expires_in"`
+ Scope string `json:"scope"`
+ RefreshToken string `json:"refresh_token"`
+}
+
+// ClientAuthentication represents an OAuth client ID and secret and the
+// mechanism for passing these credentials as stated in rfc6749#2.3.1.
+type ClientAuthentication struct {
+ AuthStyle auth.Style
+ ClientID string
+ ClientSecret string
+}
+
+// InjectAuthentication is used to add authentication to a Secure Token Service
+// exchange request. It modifies either the passed url.Values or http.Header
+// depending on the desired authentication format.
+func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) {
+ if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil {
+ return
+ }
+ switch c.AuthStyle {
+ case auth.StyleInHeader:
+ plainHeader := c.ClientID + ":" + c.ClientSecret
+ headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader)))
+ default:
+ values.Set("client_id", c.ClientID)
+ values.Set("client_secret", c.ClientSecret)
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
new file mode 100644
index 000000000..8d335ccec
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
@@ -0,0 +1,89 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credentials
+
+import (
+ "context"
+ "crypto"
+ "errors"
+ "fmt"
+ "log/slog"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/credsfile"
+ "cloud.google.com/go/auth/internal/jwt"
+)
+
+var (
+ // for testing
+ now func() time.Time = time.Now
+)
+
+// configureSelfSignedJWT uses the private key in the service account to create
+// a JWT without making a network call.
+func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ if len(opts.scopes()) == 0 && opts.Audience == "" {
+ return nil, errors.New("credentials: both scopes and audience are empty")
+ }
+ signer, err := internal.ParseKey([]byte(f.PrivateKey))
+ if err != nil {
+ return nil, fmt.Errorf("credentials: could not parse key: %w", err)
+ }
+ return &selfSignedTokenProvider{
+ email: f.ClientEmail,
+ audience: opts.Audience,
+ scopes: opts.scopes(),
+ signer: signer,
+ pkID: f.PrivateKeyID,
+ logger: opts.logger(),
+ }, nil
+}
+
+type selfSignedTokenProvider struct {
+ email string
+ audience string
+ scopes []string
+ signer crypto.Signer
+ pkID string
+ logger *slog.Logger
+}
+
+func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
+ iat := now()
+ exp := iat.Add(time.Hour)
+ scope := strings.Join(tp.scopes, " ")
+ c := &jwt.Claims{
+ Iss: tp.email,
+ Sub: tp.email,
+ Aud: tp.audience,
+ Scope: scope,
+ Iat: iat.Unix(),
+ Exp: exp.Unix(),
+ }
+ h := &jwt.Header{
+ Algorithm: jwt.HeaderAlgRSA256,
+ Type: jwt.HeaderType,
+ KeyID: string(tp.pkID),
+ }
+ tok, err := jwt.EncodeJWS(h, c, tp.signer)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: could not encode JWT: %w", err)
+ }
+ tp.logger.Debug("created self-signed JWT", "token", tok)
+ return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
new file mode 100644
index 000000000..5758e85b5
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
@@ -0,0 +1,247 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package httptransport provides functionality for managing HTTP client
+// connections to Google Cloud services.
+package httptransport
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net/http"
+
+ "cloud.google.com/go/auth"
+ detect "cloud.google.com/go/auth/credentials"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/transport"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+// ClientCertProvider is a function that returns a TLS client certificate to be
+// used when opening TLS connections. It follows the same semantics as
+// [crypto/tls.Config.GetClientCertificate].
+type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
+
+// Options used to configure a [net/http.Client] from [NewClient].
+type Options struct {
+ // DisableTelemetry disables default telemetry (OpenTelemetry). An example
+ // reason to do so would be to bind custom telemetry that overrides the
+ // defaults.
+ DisableTelemetry bool
+ // DisableAuthentication specifies that no authentication should be used. It
+ // is suitable only for testing and for accessing public resources, like
+ // public Google Cloud Storage buckets.
+ DisableAuthentication bool
+ // Headers are extra HTTP headers that will be appended to every outgoing
+ // request.
+ Headers http.Header
+ // BaseRoundTripper overrides the base transport used for serving requests.
+ // If specified ClientCertProvider is ignored.
+ BaseRoundTripper http.RoundTripper
+ // Endpoint overrides the default endpoint to be used for a service.
+ Endpoint string
+ // APIKey specifies an API key to be used as the basis for authentication.
+ // If set DetectOpts are ignored.
+ APIKey string
+ // Credentials used to add Authorization header to all requests. If set
+ // DetectOpts are ignored.
+ Credentials *auth.Credentials
+ // ClientCertProvider is a function that returns a TLS client certificate to
+ // be used when opening TLS connections. It follows the same semantics as
+ // crypto/tls.Config.GetClientCertificate.
+ ClientCertProvider ClientCertProvider
+ // DetectOpts configures settings for detect Application Default
+ // Credentials.
+ DetectOpts *detect.DetectOptions
+ // UniverseDomain is the default service domain for a given Cloud universe.
+ // The default value is "googleapis.com". This is the universe domain
+ // configured for the client, which will be compared to the universe domain
+ // that is separately configured for the credentials.
+ UniverseDomain string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+
+ // InternalOptions are NOT meant to be set directly by consumers of this
+ // package, they should only be set by generated client code.
+ InternalOptions *InternalOptions
+}
+
+func (o *Options) validate() error {
+ if o == nil {
+ return errors.New("httptransport: opts required to be non-nil")
+ }
+ if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
+ return nil
+ }
+ hasCreds := o.APIKey != "" ||
+ o.Credentials != nil ||
+ (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
+ (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "")
+ if o.DisableAuthentication && hasCreds {
+ return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials")
+ }
+ return nil
+}
+
+// client returns the client a user set for the detect options or nil if one was
+// not set.
+func (o *Options) client() *http.Client {
+ if o.DetectOpts != nil && o.DetectOpts.Client != nil {
+ return o.DetectOpts.Client
+ }
+ return nil
+}
+
+func (o *Options) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
+func (o *Options) resolveDetectOptions() *detect.DetectOptions {
+ io := o.InternalOptions
+ // soft-clone these so we are not updating a ref the user holds and may reuse
+ do := transport.CloneDetectOptions(o.DetectOpts)
+
+ // If scoped JWTs are enabled user provided an aud, allow self-signed JWT.
+ if (io != nil && io.EnableJWTWithScope) || do.Audience != "" {
+ do.UseSelfSignedJWT = true
+ }
+ // Only default scopes if user did not also set an audience.
+ if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 {
+ do.Scopes = make([]string, len(io.DefaultScopes))
+ copy(do.Scopes, io.DefaultScopes)
+ }
+ if len(do.Scopes) == 0 && do.Audience == "" && io != nil {
+ do.Audience = o.InternalOptions.DefaultAudience
+ }
+ if o.ClientCertProvider != nil {
+ tlsConfig := &tls.Config{
+ GetClientCertificate: o.ClientCertProvider,
+ }
+ do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig)
+ do.TokenURL = detect.GoogleMTLSTokenURL
+ }
+ if do.Logger == nil {
+ do.Logger = o.logger()
+ }
+ return do
+}
+
+// InternalOptions are only meant to be set by generated client code. These are
+// not meant to be set directly by consumers of this package. Configuration in
+// this type is considered EXPERIMENTAL and may be removed at any time in the
+// future without warning.
+type InternalOptions struct {
+ // EnableJWTWithScope specifies if scope can be used with self-signed JWT.
+ EnableJWTWithScope bool
+ // DefaultAudience specifies a default audience to be used as the audience
+ // field ("aud") for the JWT token authentication.
+ DefaultAudience string
+ // DefaultEndpointTemplate combined with UniverseDomain specifies the
+ // default endpoint.
+ DefaultEndpointTemplate string
+ // DefaultMTLSEndpoint specifies the default mTLS endpoint.
+ DefaultMTLSEndpoint string
+ // DefaultScopes specifies the default OAuth2 scopes to be used for a
+ // service.
+ DefaultScopes []string
+ // SkipValidation bypasses validation on Options. It should only be used
+ // internally for clients that need more control over their transport.
+ SkipValidation bool
+ // SkipUniverseDomainValidation skips the verification that the universe
+ // domain configured for the client matches the universe domain configured
+ // for the credentials. It should only be used internally for clients that
+ // need more control over their transport. The default is false.
+ SkipUniverseDomainValidation bool
+}
+
+// AddAuthorizationMiddleware adds a middleware to the provided client's
+// transport that sets the Authorization header with the value produced by the
+// provided [cloud.google.com/go/auth.Credentials]. An error is returned only
+// if client or creds is nil.
+//
+// This function does not support setting a universe domain value on the client.
+func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error {
+ if client == nil || creds == nil {
+ return fmt.Errorf("httptransport: client and tp must not be nil")
+ }
+ base := client.Transport
+ if base == nil {
+ if dt, ok := http.DefaultTransport.(*http.Transport); ok {
+ base = dt.Clone()
+ } else {
+ // Directly reuse the DefaultTransport if the application has
+ // replaced it with an implementation of RoundTripper other than
+ // http.Transport.
+ base = http.DefaultTransport
+ }
+ }
+ client.Transport = &authTransport{
+ creds: creds,
+ base: base,
+ }
+ return nil
+}
+
+// NewClient returns a [net/http.Client] that can be used to communicate with a
+// Google cloud service, configured with the provided [Options]. It
+// automatically appends Authorization headers to all outgoing requests.
+func NewClient(opts *Options) (*http.Client, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+
+ tOpts := &transport.Options{
+ Endpoint: opts.Endpoint,
+ ClientCertProvider: opts.ClientCertProvider,
+ Client: opts.client(),
+ UniverseDomain: opts.UniverseDomain,
+ Logger: opts.logger(),
+ }
+ if io := opts.InternalOptions; io != nil {
+ tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
+ tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint
+ }
+ clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts)
+ if err != nil {
+ return nil, err
+ }
+ baseRoundTripper := opts.BaseRoundTripper
+ if baseRoundTripper == nil {
+ baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext)
+ }
+ // Ensure the token exchange transport uses the same ClientCertProvider as the API transport.
+ opts.ClientCertProvider = clientCertProvider
+ trans, err := newTransport(baseRoundTripper, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &http.Client{
+ Transport: trans,
+ }, nil
+}
+
+// SetAuthHeader uses the provided token to set the Authorization header on a
+// request. If the token.Type is empty, the type is assumed to be Bearer.
+func SetAuthHeader(token *auth.Token, req *http.Request) {
+ typ := token.Type
+ if typ == "" {
+ typ = internal.TokenTypeBearer
+ }
+ req.Header.Set("Authorization", typ+" "+token.Value)
+}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go
new file mode 100644
index 000000000..ee215b6dc
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go
@@ -0,0 +1,234 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httptransport
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http"
+ "os"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/credentials"
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/transport"
+ "cloud.google.com/go/auth/internal/transport/cert"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+ "golang.org/x/net/http2"
+)
+
+const (
+ quotaProjectHeaderKey = "X-goog-user-project"
+)
+
+func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) {
+ var headers = opts.Headers
+ ht := &headerTransport{
+ base: base,
+ headers: headers,
+ }
+ var trans http.RoundTripper = ht
+ trans = addOpenTelemetryTransport(trans, opts)
+ switch {
+ case opts.DisableAuthentication:
+ // Do nothing.
+ case opts.APIKey != "":
+ qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey))
+ if qp != "" {
+ if headers == nil {
+ headers = make(map[string][]string, 1)
+ }
+ headers.Set(quotaProjectHeaderKey, qp)
+ }
+ trans = &apiKeyTransport{
+ Transport: trans,
+ Key: opts.APIKey,
+ }
+ default:
+ var creds *auth.Credentials
+ if opts.Credentials != nil {
+ creds = opts.Credentials
+ } else {
+ var err error
+ creds, err = credentials.DetectDefault(opts.resolveDetectOptions())
+ if err != nil {
+ return nil, err
+ }
+ }
+ qp, err := creds.QuotaProjectID(context.Background())
+ if err != nil {
+ return nil, err
+ }
+ if qp != "" {
+ if headers == nil {
+ headers = make(map[string][]string, 1)
+ }
+ // Don't overwrite user specified quota
+ if v := headers.Get(quotaProjectHeaderKey); v == "" {
+ headers.Set(quotaProjectHeaderKey, qp)
+ }
+ }
+ var skipUD bool
+ if iOpts := opts.InternalOptions; iOpts != nil {
+ skipUD = iOpts.SkipUniverseDomainValidation
+ }
+ creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil)
+ trans = &authTransport{
+ base: trans,
+ creds: creds,
+ clientUniverseDomain: opts.UniverseDomain,
+ skipUniverseDomainValidation: skipUD,
+ }
+ }
+ return trans, nil
+}
+
+// defaultBaseTransport returns the base HTTP transport.
+// On App Engine, this is urlfetch.Transport.
+// Otherwise, use a default transport, taking most defaults from
+// http.DefaultTransport.
+// If TLSCertificate is available, set TLSClientConfig as well.
+func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper {
+ defaultTransport, ok := http.DefaultTransport.(*http.Transport)
+ if !ok {
+ defaultTransport = transport.BaseTransport()
+ }
+ trans := defaultTransport.Clone()
+ trans.MaxIdleConnsPerHost = 100
+
+ if clientCertSource != nil {
+ trans.TLSClientConfig = &tls.Config{
+ GetClientCertificate: clientCertSource,
+ }
+ }
+ if dialTLSContext != nil {
+ // If DialTLSContext is set, TLSClientConfig wil be ignored
+ trans.DialTLSContext = dialTLSContext
+ }
+
+ // Configures the ReadIdleTimeout HTTP/2 option for the
+ // transport. This allows broken idle connections to be pruned more quickly,
+ // preventing the client from attempting to re-use connections that will no
+ // longer work.
+ http2Trans, err := http2.ConfigureTransports(trans)
+ if err == nil {
+ http2Trans.ReadIdleTimeout = time.Second * 31
+ }
+
+ return trans
+}
+
+type apiKeyTransport struct {
+ // Key is the API Key to set on requests.
+ Key string
+ // Transport is the underlying HTTP transport.
+ // If nil, http.DefaultTransport is used.
+ Transport http.RoundTripper
+}
+
+func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ newReq := *req
+ args := newReq.URL.Query()
+ args.Set("key", t.Key)
+ newReq.URL.RawQuery = args.Encode()
+ return t.Transport.RoundTrip(&newReq)
+}
+
+type headerTransport struct {
+ headers http.Header
+ base http.RoundTripper
+}
+
+func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ rt := t.base
+ newReq := *req
+ newReq.Header = make(http.Header)
+ for k, vv := range req.Header {
+ newReq.Header[k] = vv
+ }
+
+ for k, v := range t.headers {
+ newReq.Header[k] = v
+ }
+
+ return rt.RoundTrip(&newReq)
+}
+
+func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
+ if opts.DisableTelemetry {
+ return trans
+ }
+ return otelhttp.NewTransport(trans)
+}
+
+type authTransport struct {
+ creds *auth.Credentials
+ base http.RoundTripper
+ clientUniverseDomain string
+ skipUniverseDomainValidation bool
+}
+
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
+func (t *authTransport) getClientUniverseDomain() string {
+ if t.clientUniverseDomain != "" {
+ return t.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
+ }
+ return internal.DefaultUniverseDomain
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token from Transport's Source. Per the RoundTripper contract we must
+// not modify the initial request, so we clone it, and we must close the body
+// on any errors that happens during our token logic.
+func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqBodyClosed := false
+ if req.Body != nil {
+ defer func() {
+ if !reqBodyClosed {
+ req.Body.Close()
+ }
+ }()
+ }
+ token, err := t.creds.Token(req.Context())
+ if err != nil {
+ return nil, err
+ }
+ if !t.skipUniverseDomainValidation && token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
+ credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
+ if err != nil {
+ return nil, err
+ }
+ if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
+ return nil, err
+ }
+ }
+ req2 := req.Clone(req.Context())
+ SetAuthHeader(token, req2)
+ reqBodyClosed = true
+ return t.base.RoundTrip(req2)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go
new file mode 100644
index 000000000..9cd4bed61
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go
@@ -0,0 +1,107 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credsfile is meant to hide implementation details from the pubic
+// surface of the detect package. It should not import any other packages in
+// this module. It is located under the main internal package so other
+// sub-packages can use these parsed types as well.
+package credsfile
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+)
+
+const (
+ // GoogleAppCredsEnvVar is the environment variable for setting the
+ // application default credentials.
+ GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
+ userCredsFilename = "application_default_credentials.json"
+)
+
+// CredentialType represents different credential filetypes Google credentials
+// can be.
+type CredentialType int
+
+const (
+ // UnknownCredType is an unidentified file type.
+ UnknownCredType CredentialType = iota
+ // UserCredentialsKey represents a user creds file type.
+ UserCredentialsKey
+ // ServiceAccountKey represents a service account file type.
+ ServiceAccountKey
+ // ImpersonatedServiceAccountKey represents a impersonated service account
+ // file type.
+ ImpersonatedServiceAccountKey
+ // ExternalAccountKey represents a external account file type.
+ ExternalAccountKey
+ // GDCHServiceAccountKey represents a GDCH file type.
+ GDCHServiceAccountKey
+ // ExternalAccountAuthorizedUserKey represents a external account authorized
+ // user file type.
+ ExternalAccountAuthorizedUserKey
+)
+
+// parseCredentialType returns the associated filetype based on the parsed
+// typeString provided.
+func parseCredentialType(typeString string) CredentialType {
+ switch typeString {
+ case "service_account":
+ return ServiceAccountKey
+ case "authorized_user":
+ return UserCredentialsKey
+ case "impersonated_service_account":
+ return ImpersonatedServiceAccountKey
+ case "external_account":
+ return ExternalAccountKey
+ case "external_account_authorized_user":
+ return ExternalAccountAuthorizedUserKey
+ case "gdch_service_account":
+ return GDCHServiceAccountKey
+ default:
+ return UnknownCredType
+ }
+}
+
+// GetFileNameFromEnv returns the override if provided or detects a filename
+// from the environment.
+func GetFileNameFromEnv(override string) string {
+ if override != "" {
+ return override
+ }
+ return os.Getenv(GoogleAppCredsEnvVar)
+}
+
+// GetWellKnownFileName tries to locate the filepath for the user credential
+// file based on the environment.
+func GetWellKnownFileName() string {
+ if runtime.GOOS == "windows" {
+ return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename)
+ }
+ return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename)
+}
+
+// guessUnixHomeDir default to checking for HOME, but not all unix systems have
+// this set, do have a fallback.
+func guessUnixHomeDir() string {
+ if v := os.Getenv("HOME"); v != "" {
+ return v
+ }
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
+ }
+ return ""
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
new file mode 100644
index 000000000..3be6e5bbb
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
@@ -0,0 +1,157 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credsfile
+
+import (
+ "encoding/json"
+)
+
+// Config3LO is the internals of a client creds file.
+type Config3LO struct {
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ RedirectURIs []string `json:"redirect_uris"`
+ AuthURI string `json:"auth_uri"`
+ TokenURI string `json:"token_uri"`
+}
+
+// ClientCredentialsFile representation.
+type ClientCredentialsFile struct {
+ Web *Config3LO `json:"web"`
+ Installed *Config3LO `json:"installed"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// ServiceAccountFile representation.
+type ServiceAccountFile struct {
+ Type string `json:"type"`
+ ProjectID string `json:"project_id"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ ClientEmail string `json:"client_email"`
+ ClientID string `json:"client_id"`
+ AuthURL string `json:"auth_uri"`
+ TokenURL string `json:"token_uri"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// UserCredentialsFile representation.
+type UserCredentialsFile struct {
+ Type string `json:"type"`
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ QuotaProjectID string `json:"quota_project_id"`
+ RefreshToken string `json:"refresh_token"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// ExternalAccountFile representation.
+type ExternalAccountFile struct {
+ Type string `json:"type"`
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ Audience string `json:"audience"`
+ SubjectTokenType string `json:"subject_token_type"`
+ ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
+ TokenURL string `json:"token_url"`
+ CredentialSource *CredentialSource `json:"credential_source,omitempty"`
+ TokenInfoURL string `json:"token_info_url"`
+ ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"`
+ QuotaProjectID string `json:"quota_project_id"`
+ WorkforcePoolUserProject string `json:"workforce_pool_user_project"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// ExternalAccountAuthorizedUserFile representation.
+type ExternalAccountAuthorizedUserFile struct {
+ Type string `json:"type"`
+ Audience string `json:"audience"`
+ ClientID string `json:"client_id"`
+ ClientSecret string `json:"client_secret"`
+ RefreshToken string `json:"refresh_token"`
+ TokenURL string `json:"token_url"`
+ TokenInfoURL string `json:"token_info_url"`
+ RevokeURL string `json:"revoke_url"`
+ QuotaProjectID string `json:"quota_project_id"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange.
+//
+// One field amongst File, URL, Certificate, and Executable should be filled, depending on the kind of credential in question.
+// The EnvironmentID should start with AWS if being used for an AWS credential.
+type CredentialSource struct {
+ File string `json:"file"`
+ URL string `json:"url"`
+ Headers map[string]string `json:"headers"`
+ Executable *ExecutableConfig `json:"executable,omitempty"`
+ Certificate *CertificateConfig `json:"certificate"`
+ EnvironmentID string `json:"environment_id"` // TODO: Make type for this
+ RegionURL string `json:"region_url"`
+ RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
+ CredVerificationURL string `json:"cred_verification_url"`
+ IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"`
+ Format *Format `json:"format,omitempty"`
+}
+
+// Format describes the format of a [CredentialSource].
+type Format struct {
+ // Type is either "text" or "json". When not provided "text" type is assumed.
+ Type string `json:"type"`
+ // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure.
+ SubjectTokenFieldName string `json:"subject_token_field_name"`
+}
+
+// ExecutableConfig represents the command to run for an executable
+// [CredentialSource].
+type ExecutableConfig struct {
+ Command string `json:"command"`
+ TimeoutMillis int `json:"timeout_millis"`
+ OutputFile string `json:"output_file"`
+}
+
+// CertificateConfig represents the options used to set up X509 based workload
+// [CredentialSource]
+type CertificateConfig struct {
+ UseDefaultCertificateConfig bool `json:"use_default_certificate_config"`
+ CertificateConfigLocation string `json:"certificate_config_location"`
+}
+
+// ServiceAccountImpersonationInfo has impersonation configuration.
+type ServiceAccountImpersonationInfo struct {
+ TokenLifetimeSeconds int `json:"token_lifetime_seconds"`
+}
+
+// ImpersonatedServiceAccountFile representation.
+type ImpersonatedServiceAccountFile struct {
+ Type string `json:"type"`
+ ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
+ Delegates []string `json:"delegates"`
+ CredSource json.RawMessage `json:"source_credentials"`
+ UniverseDomain string `json:"universe_domain"`
+}
+
+// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file.
+type GDCHServiceAccountFile struct {
+ Type string `json:"type"`
+ FormatVersion string `json:"format_version"`
+ Project string `json:"project"`
+ Name string `json:"name"`
+ CertPath string `json:"ca_cert_path"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ TokenURL string `json:"token_uri"`
+ UniverseDomain string `json:"universe_domain"`
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go
new file mode 100644
index 000000000..a02b9f5df
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go
@@ -0,0 +1,98 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package credsfile
+
+import (
+ "encoding/json"
+)
+
+// ParseServiceAccount parses bytes into a [ServiceAccountFile].
+func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) {
+ var f *ServiceAccountFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseClientCredentials parses bytes into a
+// [credsfile.ClientCredentialsFile].
+func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) {
+ var f *ClientCredentialsFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseUserCredentials parses bytes into a [UserCredentialsFile].
+func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) {
+ var f *UserCredentialsFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseExternalAccount parses bytes into a [ExternalAccountFile].
+func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) {
+ var f *ExternalAccountFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseExternalAccountAuthorizedUser parses bytes into a
+// [ExternalAccountAuthorizedUserFile].
+func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) {
+ var f *ExternalAccountAuthorizedUserFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseImpersonatedServiceAccount parses bytes into a
+// [ImpersonatedServiceAccountFile].
+func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) {
+ var f *ImpersonatedServiceAccountFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile].
+func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) {
+ var f *GDCHServiceAccountFile
+ if err := json.Unmarshal(b, &f); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+type fileTypeChecker struct {
+ Type string `json:"type"`
+}
+
+// ParseFileType determines the [CredentialType] based on bytes provided.
+func ParseFileType(b []byte) (CredentialType, error) {
+ var f fileTypeChecker
+ if err := json.Unmarshal(b, &f); err != nil {
+ return 0, err
+ }
+ return parseCredentialType(f.Type), nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go
new file mode 100644
index 000000000..6a8eab6eb
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/internal.go
@@ -0,0 +1,225 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "context"
+ "crypto"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sync"
+ "time"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+const (
+ // TokenTypeBearer is the auth header prefix for bearer tokens.
+ TokenTypeBearer = "Bearer"
+
+ // QuotaProjectEnvVar is the environment variable for setting the quota
+ // project.
+ QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
+ // UniverseDomainEnvVar is the environment variable for setting the default
+ // service domain for a given Cloud universe.
+ UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN"
+ projectEnvVar = "GOOGLE_CLOUD_PROJECT"
+ maxBodySize = 1 << 20
+
+ // DefaultUniverseDomain is the default value for universe domain.
+ // Universe domain is the default service domain for a given Cloud universe.
+ DefaultUniverseDomain = "googleapis.com"
+)
+
+type clonableTransport interface {
+ Clone() *http.Transport
+}
+
+// DefaultClient returns an [http.Client] with some defaults set. If
+// the current [http.DefaultTransport] is a [clonableTransport], as
+// is the case for an [*http.Transport], the clone will be used.
+// Otherwise the [http.DefaultTransport] is used directly.
+func DefaultClient() *http.Client {
+ if transport, ok := http.DefaultTransport.(clonableTransport); ok {
+ return &http.Client{
+ Transport: transport.Clone(),
+ Timeout: 30 * time.Second,
+ }
+ }
+
+ return &http.Client{
+ Transport: http.DefaultTransport,
+ Timeout: 30 * time.Second,
+ }
+}
+
+// ParseKey converts the binary contents of a private key file
+// to an crypto.Signer. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (crypto.Signer, error) {
+ block, _ := pem.Decode(key)
+ if block != nil {
+ key = block.Bytes
+ }
+ var parsedKey crypto.PrivateKey
+ var err error
+ parsedKey, err = x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err)
+ }
+ }
+ parsed, ok := parsedKey.(crypto.Signer)
+ if !ok {
+ return nil, errors.New("private key is not a signer")
+ }
+ return parsed, nil
+}
+
+// GetQuotaProject retrieves quota project with precedence being: override,
+// environment variable, creds json file.
+func GetQuotaProject(b []byte, override string) string {
+ if override != "" {
+ return override
+ }
+ if env := os.Getenv(QuotaProjectEnvVar); env != "" {
+ return env
+ }
+ if b == nil {
+ return ""
+ }
+ var v struct {
+ QuotaProject string `json:"quota_project_id"`
+ }
+ if err := json.Unmarshal(b, &v); err != nil {
+ return ""
+ }
+ return v.QuotaProject
+}
+
+// GetProjectID retrieves project with precedence being: override,
+// environment variable, creds json file.
+func GetProjectID(b []byte, override string) string {
+ if override != "" {
+ return override
+ }
+ if env := os.Getenv(projectEnvVar); env != "" {
+ return env
+ }
+ if b == nil {
+ return ""
+ }
+ var v struct {
+ ProjectID string `json:"project_id"` // standard service account key
+ Project string `json:"project"` // gdch key
+ }
+ if err := json.Unmarshal(b, &v); err != nil {
+ return ""
+ }
+ if v.ProjectID != "" {
+ return v.ProjectID
+ }
+ return v.Project
+}
+
+// DoRequest executes the provided req with the client. It reads the response
+// body, closes it, and returns it.
+func DoRequest(client *http.Client, req *http.Request) (*http.Response, []byte, error) {
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+ body, err := ReadAll(io.LimitReader(resp.Body, maxBodySize))
+ if err != nil {
+ return nil, nil, err
+ }
+ return resp, body, nil
+}
+
+// ReadAll consumes the whole reader and safely reads the content of its body
+// with some overflow protection.
+func ReadAll(r io.Reader) ([]byte, error) {
+ return io.ReadAll(io.LimitReader(r, maxBodySize))
+}
+
+// StaticCredentialsProperty is a helper for creating static credentials
+// properties.
+func StaticCredentialsProperty(s string) StaticProperty {
+ return StaticProperty(s)
+}
+
+// StaticProperty always returns that value of the underlying string.
+type StaticProperty string
+
+// GetProperty loads the properly value provided the given context.
+func (p StaticProperty) GetProperty(context.Context) (string, error) {
+ return string(p), nil
+}
+
+// ComputeUniverseDomainProvider fetches the credentials universe domain from
+// the google cloud metadata service.
+type ComputeUniverseDomainProvider struct {
+ MetadataClient *metadata.Client
+ universeDomainOnce sync.Once
+ universeDomain string
+ universeDomainErr error
+}
+
+// GetProperty fetches the credentials universe domain from the google cloud
+// metadata service.
+func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) {
+ c.universeDomainOnce.Do(func() {
+ c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient)
+ })
+ if c.universeDomainErr != nil {
+ return "", c.universeDomainErr
+ }
+ return c.universeDomain, nil
+}
+
+// httpGetMetadataUniverseDomain is a package var for unit test substitution.
+var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) {
+ ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
+ defer cancel()
+ return client.GetWithContext(ctx, "universe/universe-domain")
+}
+
+func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) {
+ universeDomain, err := httpGetMetadataUniverseDomain(ctx, client)
+ if err == nil {
+ return universeDomain, nil
+ }
+ if _, ok := err.(metadata.NotDefinedError); ok {
+ // http.StatusNotFound (404)
+ return DefaultUniverseDomain, nil
+ }
+ return "", err
+}
+
+// FormatIAMServiceAccountResource sets a service account name in an IAM resource
+// name.
+func FormatIAMServiceAccountResource(name string) string {
+ return fmt.Sprintf("projects/-/serviceAccounts/%s", name)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
new file mode 100644
index 000000000..9bd55f510
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
@@ -0,0 +1,171 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jwt
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+const (
+ // HeaderAlgRSA256 is the RS256 [Header.Algorithm].
+ HeaderAlgRSA256 = "RS256"
+ // HeaderAlgES256 is the ES256 [Header.Algorithm].
+ HeaderAlgES256 = "ES256"
+ // HeaderType is the standard [Header.Type].
+ HeaderType = "JWT"
+)
+
+// Header represents a JWT header.
+type Header struct {
+ Algorithm string `json:"alg"`
+ Type string `json:"typ"`
+ KeyID string `json:"kid"`
+}
+
+func (h *Header) encode() (string, error) {
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+ return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// Claims represents the claims set of a JWT.
+type Claims struct {
+ // Iss is the issuer JWT claim.
+ Iss string `json:"iss"`
+ // Scope is the scope JWT claim.
+ Scope string `json:"scope,omitempty"`
+ // Exp is the expiry JWT claim. If unset, default is in one hour from now.
+ Exp int64 `json:"exp"`
+ // Iat is the subject issued at claim. If unset, default is now.
+ Iat int64 `json:"iat"`
+ // Aud is the audience JWT claim. Optional.
+ Aud string `json:"aud"`
+ // Sub is the subject JWT claim. Optional.
+ Sub string `json:"sub,omitempty"`
+ // AdditionalClaims contains any additional non-standard JWT claims. Optional.
+ AdditionalClaims map[string]interface{} `json:"-"`
+}
+
+func (c *Claims) encode() (string, error) {
+ // Compensate for skew
+ now := time.Now().Add(-10 * time.Second)
+ if c.Iat == 0 {
+ c.Iat = now.Unix()
+ }
+ if c.Exp == 0 {
+ c.Exp = now.Add(time.Hour).Unix()
+ }
+ if c.Exp < c.Iat {
+ return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat)
+ }
+
+ b, err := json.Marshal(c)
+ if err != nil {
+ return "", err
+ }
+
+ if len(c.AdditionalClaims) == 0 {
+ return base64.RawURLEncoding.EncodeToString(b), nil
+ }
+
+ // Marshal private claim set and then append it to b.
+ prv, err := json.Marshal(c.AdditionalClaims)
+ if err != nil {
+ return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err)
+ }
+
+ // Concatenate public and private claim JSON objects.
+ if !bytes.HasSuffix(b, []byte{'}'}) {
+ return "", fmt.Errorf("invalid JSON %s", b)
+ }
+ if !bytes.HasPrefix(prv, []byte{'{'}) {
+ return "", fmt.Errorf("invalid JSON %s", prv)
+ }
+ b[len(b)-1] = ',' // Replace closing curly brace with a comma.
+ b = append(b, prv[1:]...) // Append private claims.
+ return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// EncodeJWS encodes the data using the provided key as a JSON web signature.
+func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) {
+ head, err := header.encode()
+ if err != nil {
+ return "", err
+ }
+ claims, err := c.encode()
+ if err != nil {
+ return "", err
+ }
+ ss := fmt.Sprintf("%s.%s", head, claims)
+ h := sha256.New()
+ h.Write([]byte(ss))
+ sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil
+}
+
+// DecodeJWS decodes a claim set from a JWS payload.
+func DecodeJWS(payload string) (*Claims, error) {
+ // decode returned id token to get expiry
+ s := strings.Split(payload, ".")
+ if len(s) < 2 {
+ return nil, errors.New("invalid token received")
+ }
+ decoded, err := base64.RawURLEncoding.DecodeString(s[1])
+ if err != nil {
+ return nil, err
+ }
+ c := &Claims{}
+ if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil {
+ return nil, err
+ }
+ if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil {
+ return nil, err
+ }
+ return c, err
+}
+
+// VerifyJWS tests whether the provided JWT token's signature was produced by
+// the private key associated with the provided public key.
+func VerifyJWS(token string, key *rsa.PublicKey) error {
+ parts := strings.Split(token, ".")
+ if len(parts) != 3 {
+ return errors.New("jwt: invalid token received, token must have 3 parts")
+ }
+
+ signedContent := parts[0] + "." + parts[1]
+ signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
+ if err != nil {
+ return err
+ }
+
+ h := sha256.New()
+ h.Write([]byte(signedContent))
+ return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
new file mode 100644
index 000000000..b1f0fcf93
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
@@ -0,0 +1,385 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "log"
+ "log/slog"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+
+ "cloud.google.com/go/auth/internal"
+ "cloud.google.com/go/auth/internal/transport/cert"
+ "github.com/google/s2a-go"
+ "github.com/google/s2a-go/fallback"
+ "google.golang.org/grpc/credentials"
+)
+
+const (
+ mTLSModeAlways = "always"
+ mTLSModeNever = "never"
+ mTLSModeAuto = "auto"
+
+ // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false.
+ googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A"
+ googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE"
+ googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT"
+ googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS"
+
+ universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+
+ mtlsMDSRoot = "/run/google-mds-mtls/root.crt"
+ mtlsMDSKey = "/run/google-mds-mtls/client.key"
+)
+
+// Type represents the type of transport used.
+type Type int
+
+const (
+ // TransportTypeUnknown represents an unknown transport type and is the default option.
+ TransportTypeUnknown Type = iota
+ // TransportTypeMTLSS2A represents the mTLS transport type using S2A.
+ TransportTypeMTLSS2A
+)
+
+// Options is a struct that is duplicated information from the individual
+// transport packages in order to avoid cyclic deps. It correlates 1:1 with
+// fields on httptransport.Options and grpctransport.Options.
+type Options struct {
+ Endpoint string
+ DefaultEndpointTemplate string
+ DefaultMTLSEndpoint string
+ ClientCertProvider cert.Provider
+ Client *http.Client
+ UniverseDomain string
+ EnableDirectPath bool
+ EnableDirectPathXds bool
+ Logger *slog.Logger
+}
+
+// getUniverseDomain returns the default service domain for a given Cloud
+// universe.
+func (o *Options) getUniverseDomain() string {
+ if o.UniverseDomain == "" {
+ return internal.DefaultUniverseDomain
+ }
+ return o.UniverseDomain
+}
+
+// isUniverseDomainGDU returns true if the universe domain is the default Google
+// universe.
+func (o *Options) isUniverseDomainGDU() bool {
+ return o.getUniverseDomain() == internal.DefaultUniverseDomain
+}
+
+// defaultEndpoint returns the DefaultEndpointTemplate merged with the
+// universe domain if the DefaultEndpointTemplate is set, otherwise returns an
+// empty string.
+func (o *Options) defaultEndpoint() string {
+ if o.DefaultEndpointTemplate == "" {
+ return ""
+ }
+ return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1)
+}
+
+// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the
+// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an
+// empty string.
+func (o *Options) defaultMTLSEndpoint() string {
+ if o.DefaultMTLSEndpoint == "" {
+ return ""
+ }
+ return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1)
+}
+
+// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the
+// default endpoint.
+func (o *Options) mergedEndpoint() (string, error) {
+ defaultEndpoint := o.defaultEndpoint()
+ u, err := url.Parse(fixScheme(defaultEndpoint))
+ if err != nil {
+ return "", err
+ }
+ return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil
+}
+
+func fixScheme(baseURL string) string {
+ if !strings.Contains(baseURL, "://") {
+ baseURL = "https://" + baseURL
+ }
+ return baseURL
+}
+
+// GRPCTransportCredentials embeds interface TransportCredentials with additional data.
+type GRPCTransportCredentials struct {
+ credentials.TransportCredentials
+ Endpoint string
+ TransportType Type
+}
+
+// GetGRPCTransportCredsAndEndpoint returns an instance of
+// [google.golang.org/grpc/credentials.TransportCredentials], and the
+// corresponding endpoint and transport type to use for GRPC client.
+func GetGRPCTransportCredsAndEndpoint(opts *Options) (*GRPCTransportCredentials, error) {
+ config, err := getTransportConfig(opts)
+ if err != nil {
+ return nil, err
+ }
+
+ defaultTransportCreds := credentials.NewTLS(&tls.Config{
+ GetClientCertificate: config.clientCertSource,
+ })
+
+ var s2aAddr string
+ var transportCredsForS2A credentials.TransportCredentials
+
+ if config.mtlsS2AAddress != "" {
+ s2aAddr = config.mtlsS2AAddress
+ transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
+ if err != nil {
+ log.Printf("Loading MTLS MDS credentials failed: %v", err)
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
+ }
+ }
+ } else if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
+ }
+
+ var fallbackOpts *s2a.FallbackOptions
+ // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
+ if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil {
+ fallbackOpts = &s2a.FallbackOptions{
+ FallbackClientHandshakeFunc: fallbackHandshake,
+ }
+ }
+
+ s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{
+ S2AAddress: s2aAddr,
+ TransportCreds: transportCredsForS2A,
+ FallbackOpts: fallbackOpts,
+ })
+ if err != nil {
+ // Use default if we cannot initialize S2A client transport credentials.
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
+ }
+ return &GRPCTransportCredentials{s2aTransportCreds, config.s2aMTLSEndpoint, TransportTypeMTLSS2A}, nil
+}
+
+// GetHTTPTransportConfig returns a client certificate source and a function for
+// dialing MTLS with S2A.
+func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) {
+ config, err := getTransportConfig(opts)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var s2aAddr string
+ var transportCredsForS2A credentials.TransportCredentials
+
+ if config.mtlsS2AAddress != "" {
+ s2aAddr = config.mtlsS2AAddress
+ transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
+ if err != nil {
+ log.Printf("Loading MTLS MDS credentials failed: %v", err)
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return config.clientCertSource, nil, nil
+ }
+ }
+ } else if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return config.clientCertSource, nil, nil
+ }
+
+ var fallbackOpts *s2a.FallbackOptions
+ // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
+ if fallbackURL, err := url.Parse(config.endpoint); err == nil {
+ if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil {
+ fallbackOpts = &s2a.FallbackOptions{
+ FallbackDialer: &s2a.FallbackDialer{
+ Dialer: fallbackDialer,
+ ServerAddr: fallbackServerAddr,
+ },
+ }
+ }
+ }
+
+ dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{
+ S2AAddress: s2aAddr,
+ TransportCreds: transportCredsForS2A,
+ FallbackOpts: fallbackOpts,
+ })
+ return nil, dialTLSContextFunc, nil
+}
+
+func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) {
+ rootPEM, err := os.ReadFile(mtlsMDSRootFile)
+ if err != nil {
+ return nil, err
+ }
+ caCertPool := x509.NewCertPool()
+ ok := caCertPool.AppendCertsFromPEM(rootPEM)
+ if !ok {
+ return nil, errors.New("failed to load MTLS MDS root certificate")
+ }
+ // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain
+ // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the
+ // tls.X509KeyPair function as both the certificate chain and private key arguments.
+ cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig := tls.Config{
+ RootCAs: caCertPool,
+ Certificates: []tls.Certificate{cert},
+ MinVersion: tls.VersionTLS13,
+ }
+ return credentials.NewTLS(&tlsConfig), nil
+}
+
+func getTransportConfig(opts *Options) (*transportConfig, error) {
+ clientCertSource, err := GetClientCertificateProvider(opts)
+ if err != nil {
+ return nil, err
+ }
+ endpoint, err := getEndpoint(opts, clientCertSource)
+ if err != nil {
+ return nil, err
+ }
+ defaultTransportConfig := transportConfig{
+ clientCertSource: clientCertSource,
+ endpoint: endpoint,
+ }
+
+ if !shouldUseS2A(clientCertSource, opts) {
+ return &defaultTransportConfig, nil
+ }
+
+ s2aAddress := GetS2AAddress(opts.Logger)
+ mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger)
+ if s2aAddress == "" && mtlsS2AAddress == "" {
+ return &defaultTransportConfig, nil
+ }
+ return &transportConfig{
+ clientCertSource: clientCertSource,
+ endpoint: endpoint,
+ s2aAddress: s2aAddress,
+ mtlsS2AAddress: mtlsS2AAddress,
+ s2aMTLSEndpoint: opts.defaultMTLSEndpoint(),
+ }, nil
+}
+
+// GetClientCertificateProvider returns a default client certificate source, if
+// not provided by the user.
+//
+// A nil default source can be returned if the source does not exist. Any exceptions
+// encountered while initializing the default source will be reported as client
+// error (ex. corrupt metadata file).
+func GetClientCertificateProvider(opts *Options) (cert.Provider, error) {
+ if !isClientCertificateEnabled(opts) {
+ return nil, nil
+ } else if opts.ClientCertProvider != nil {
+ return opts.ClientCertProvider, nil
+ }
+ return cert.DefaultProvider()
+
+}
+
+// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var
+func isClientCertificateEnabled(opts *Options) bool {
+ if value, ok := os.LookupEnv(googleAPIUseCertSource); ok {
+ // error as false is OK
+ b, _ := strconv.ParseBool(value)
+ return b
+ }
+ return opts.isUniverseDomainGDU()
+}
+
+type transportConfig struct {
+ // The client certificate source.
+ clientCertSource cert.Provider
+ // The corresponding endpoint to use based on client certificate source.
+ endpoint string
+ // The plaintext S2A address if it can be used, otherwise an empty string.
+ s2aAddress string
+ // The MTLS S2A address if it can be used, otherwise an empty string.
+ mtlsS2AAddress string
+ // The MTLS endpoint to use with S2A.
+ s2aMTLSEndpoint string
+}
+
+// getEndpoint returns the endpoint for the service, taking into account the
+// user-provided endpoint override "settings.Endpoint".
+//
+// If no endpoint override is specified, we will either return the default
+// endpoint or the default mTLS endpoint if a client certificate is available.
+//
+// You can override the default endpoint choice (mTLS vs. regular) by setting
+// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
+//
+// If the endpoint override is an address (host:port) rather than full base
+// URL (ex. https://...), then the user-provided address will be merged into
+// the default endpoint. For example, WithEndpoint("myhost:8000") and
+// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return
+// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS
+// endpoint.
+func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) {
+ if opts.Endpoint == "" {
+ mtlsMode := getMTLSMode()
+ if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
+ return opts.defaultMTLSEndpoint(), nil
+ }
+ return opts.defaultEndpoint(), nil
+ }
+ if strings.Contains(opts.Endpoint, "://") {
+ // User passed in a full URL path, use it verbatim.
+ return opts.Endpoint, nil
+ }
+ if opts.defaultEndpoint() == "" {
+ // If DefaultEndpointTemplate is not configured,
+ // use the user provided endpoint verbatim. This allows a naked
+ // "host[:port]" URL to be used with GRPC Direct Path.
+ return opts.Endpoint, nil
+ }
+
+ // Assume user-provided endpoint is host[:port], merge it with the default endpoint.
+ return opts.mergedEndpoint()
+}
+
+func getMTLSMode() string {
+ mode := os.Getenv(googleAPIUseMTLS)
+ if mode == "" {
+ mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated.
+ }
+ if mode == "" {
+ return mTLSModeAuto
+ }
+ return strings.ToLower(mode)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go
new file mode 100644
index 000000000..5cedc50f1
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go
@@ -0,0 +1,65 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cert
+
+import (
+ "crypto/tls"
+ "errors"
+ "sync"
+)
+
+// defaultCertData holds all the variables pertaining to
+// the default certificate provider created by [DefaultProvider].
+//
+// A singleton model is used to allow the provider to be reused
+// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil)
+// may be returned to indicate a default provider could not be found, which
+// will skip extra tls config in the transport layer .
+type defaultCertData struct {
+ once sync.Once
+ provider Provider
+ err error
+}
+
+var (
+ defaultCert defaultCertData
+)
+
+// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate.
+type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
+
+// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable.
+var errSourceUnavailable = errors.New("certificate source is unavailable")
+
+// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource.
+// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource.
+//
+// If neither source is available (due to missing configurations), a nil Source and a nil Error are
+// returned to indicate that a default certificate source is unavailable.
+func DefaultProvider() (Provider, error) {
+ defaultCert.once.Do(func() {
+ defaultCert.provider, defaultCert.err = NewWorkloadX509CertProvider("")
+ if errors.Is(defaultCert.err, errSourceUnavailable) {
+ defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("")
+ if errors.Is(defaultCert.err, errSourceUnavailable) {
+ defaultCert.provider, defaultCert.err = NewSecureConnectProvider("")
+ if errors.Is(defaultCert.err, errSourceUnavailable) {
+ defaultCert.provider, defaultCert.err = nil, nil
+ }
+ }
+ }
+ })
+ return defaultCert.provider, defaultCert.err
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
new file mode 100644
index 000000000..6c954ae19
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
@@ -0,0 +1,54 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cert
+
+import (
+ "crypto/tls"
+
+ "github.com/googleapis/enterprise-certificate-proxy/client"
+)
+
+type ecpSource struct {
+ key *client.Key
+}
+
+// NewEnterpriseCertificateProxyProvider creates a certificate source
+// using the Enterprise Certificate Proxy client, which delegates
+// certifcate related operations to an OS-specific "signer binary"
+// that communicates with the native keystore (ex. keychain on MacOS).
+//
+// The configFilePath points to a config file containing relevant parameters
+// such as the certificate issuer and the location of the signer binary.
+// If configFilePath is empty, the client will attempt to load the config from
+// a well-known gcloud location.
+func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) {
+ key, err := client.Cred(configFilePath)
+ if err != nil {
+ // TODO(codyoss): once this is fixed upstream can handle this error a
+ // little better here. But be safe for now and assume unavailable.
+ return nil, errSourceUnavailable
+ }
+
+ return (&ecpSource{
+ key: key,
+ }).getClientCertificate, nil
+}
+
+func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ var cert tls.Certificate
+ cert.PrivateKey = s.key
+ cert.Certificate = s.key.CertificateChain()
+ return &cert, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
new file mode 100644
index 000000000..738cb2161
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
@@ -0,0 +1,124 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cert
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "os/user"
+ "path/filepath"
+ "sync"
+ "time"
+)
+
+const (
+ metadataPath = ".secureConnect"
+ metadataFile = "context_aware_metadata.json"
+)
+
+type secureConnectSource struct {
+ metadata secureConnectMetadata
+
+ // Cache the cert to avoid executing helper command repeatedly.
+ cachedCertMutex sync.Mutex
+ cachedCert *tls.Certificate
+}
+
+type secureConnectMetadata struct {
+ Cmd []string `json:"cert_provider_command"`
+}
+
+// NewSecureConnectProvider creates a certificate source using
+// the Secure Connect Helper and its associated metadata file.
+//
+// The configFilePath points to the location of the context aware metadata file.
+// If configFilePath is empty, use the default context aware metadata location.
+func NewSecureConnectProvider(configFilePath string) (Provider, error) {
+ if configFilePath == "" {
+ user, err := user.Current()
+ if err != nil {
+ // Error locating the default config means Secure Connect is not supported.
+ return nil, errSourceUnavailable
+ }
+ configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile)
+ }
+
+ file, err := os.ReadFile(configFilePath)
+ if err != nil {
+ // Config file missing means Secure Connect is not supported.
+ // There are non-os.ErrNotExist errors that may be returned.
+ // (e.g. if the home directory is /dev/null, *nix systems will
+ // return ENOTDIR instead of ENOENT)
+ return nil, errSourceUnavailable
+ }
+
+ var metadata secureConnectMetadata
+ if err := json.Unmarshal(file, &metadata); err != nil {
+ return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err)
+ }
+ if err := validateMetadata(metadata); err != nil {
+ return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err)
+ }
+ return (&secureConnectSource{
+ metadata: metadata,
+ }).getClientCertificate, nil
+}
+
+func validateMetadata(metadata secureConnectMetadata) error {
+ if len(metadata.Cmd) == 0 {
+ return errors.New("empty cert_provider_command")
+ }
+ return nil
+}
+
+func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ s.cachedCertMutex.Lock()
+ defer s.cachedCertMutex.Unlock()
+ if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) {
+ return s.cachedCert, nil
+ }
+ // Expand OS environment variables in the cert provider command such as "$HOME".
+ for i := 0; i < len(s.metadata.Cmd); i++ {
+ s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i])
+ }
+ command := s.metadata.Cmd
+ data, err := exec.Command(command[0], command[1:]...).Output()
+ if err != nil {
+ return nil, err
+ }
+ cert, err := tls.X509KeyPair(data, data)
+ if err != nil {
+ return nil, err
+ }
+ s.cachedCert = &cert
+ return &cert, nil
+}
+
+// isCertificateExpired returns true if the given cert is expired or invalid.
+func isCertificateExpired(cert *tls.Certificate) bool {
+ if len(cert.Certificate) == 0 {
+ return true
+ }
+ parsed, err := x509.ParseCertificate(cert.Certificate[0])
+ if err != nil {
+ return true
+ }
+ return time.Now().After(parsed.NotAfter)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
new file mode 100644
index 000000000..347aaced7
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
@@ -0,0 +1,114 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cert
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "io"
+ "os"
+
+ "github.com/googleapis/enterprise-certificate-proxy/client/util"
+)
+
+type certConfigs struct {
+ Workload *workloadSource `json:"workload"`
+}
+
+type workloadSource struct {
+ CertPath string `json:"cert_path"`
+ KeyPath string `json:"key_path"`
+}
+
+type certificateConfig struct {
+ CertConfigs certConfigs `json:"cert_configs"`
+}
+
+// NewWorkloadX509CertProvider creates a certificate source
+// that reads a certificate and private key file from the local file system.
+// This is intended to be used for workload identity federation.
+//
+// The configFilePath points to a config file containing relevant parameters
+// such as the certificate and key file paths.
+// If configFilePath is empty, the client will attempt to load the config from
+// a well-known gcloud location.
+func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) {
+ if configFilePath == "" {
+ envFilePath := util.GetConfigFilePathFromEnv()
+ if envFilePath != "" {
+ configFilePath = envFilePath
+ } else {
+ configFilePath = util.GetDefaultConfigFilePath()
+ }
+ }
+
+ certFile, keyFile, err := getCertAndKeyFiles(configFilePath)
+ if err != nil {
+ return nil, err
+ }
+
+ source := &workloadSource{
+ CertPath: certFile,
+ KeyPath: keyFile,
+ }
+ return source.getClientCertificate, nil
+}
+
+// getClientCertificate attempts to load the certificate and key from the files specified in the
+// certificate config.
+func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath)
+ if err != nil {
+ return nil, err
+ }
+ return &cert, nil
+}
+
+// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private
+// key file paths.
+func getCertAndKeyFiles(configFilePath string) (string, string, error) {
+ jsonFile, err := os.Open(configFilePath)
+ if err != nil {
+ return "", "", errSourceUnavailable
+ }
+
+ byteValue, err := io.ReadAll(jsonFile)
+ if err != nil {
+ return "", "", err
+ }
+
+ var config certificateConfig
+ if err := json.Unmarshal(byteValue, &config); err != nil {
+ return "", "", err
+ }
+
+ if config.CertConfigs.Workload == nil {
+ return "", "", errSourceUnavailable
+ }
+
+ certFile := config.CertConfigs.Workload.CertPath
+ keyFile := config.CertConfigs.Workload.KeyPath
+
+ if certFile == "" {
+ return "", "", errors.New("certificate configuration is missing the certificate file location")
+ }
+
+ if keyFile == "" {
+ return "", "", errors.New("certificate configuration is missing the key file location")
+ }
+
+ return certFile, keyFile, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
new file mode 100644
index 000000000..a63309956
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
@@ -0,0 +1,138 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "log/slog"
+ "os"
+ "strconv"
+ "sync"
+
+ "cloud.google.com/go/auth/internal/transport/cert"
+ "cloud.google.com/go/compute/metadata"
+)
+
+const (
+ configEndpointSuffix = "instance/platform-security/auto-mtls-configuration"
+)
+
+var (
+ mtlsConfiguration *mtlsConfig
+
+ mtlsOnce sync.Once
+)
+
+// GetS2AAddress returns the S2A address to be reached via plaintext connection.
+// Returns empty string if not set or invalid.
+func GetS2AAddress(logger *slog.Logger) string {
+ getMetadataMTLSAutoConfig(logger)
+ if !mtlsConfiguration.valid() {
+ return ""
+ }
+ return mtlsConfiguration.S2A.PlaintextAddress
+}
+
+// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection.
+// Returns empty string if not set or invalid.
+func GetMTLSS2AAddress(logger *slog.Logger) string {
+ getMetadataMTLSAutoConfig(logger)
+ if !mtlsConfiguration.valid() {
+ return ""
+ }
+ return mtlsConfiguration.S2A.MTLSAddress
+}
+
+// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs.
+type mtlsConfig struct {
+ S2A *s2aAddresses `json:"s2a"`
+}
+
+func (c *mtlsConfig) valid() bool {
+ return c != nil && c.S2A != nil
+}
+
+// s2aAddresses contains the plaintext and/or MTLS S2A addresses.
+type s2aAddresses struct {
+ // PlaintextAddress is the plaintext address to reach S2A
+ PlaintextAddress string `json:"plaintext_address"`
+ // MTLSAddress is the MTLS address to reach S2A
+ MTLSAddress string `json:"mtls_address"`
+}
+
+func getMetadataMTLSAutoConfig(logger *slog.Logger) {
+ var err error
+ mtlsOnce.Do(func() {
+ mtlsConfiguration, err = queryConfig(logger)
+ if err != nil {
+ log.Printf("Getting MTLS config failed: %v", err)
+ }
+ })
+}
+
+var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) {
+ metadataClient := metadata.NewWithOptions(&metadata.Options{
+ Logger: logger,
+ })
+ return metadataClient.GetWithContext(context.Background(), configEndpointSuffix)
+}
+
+func queryConfig(logger *slog.Logger) (*mtlsConfig, error) {
+ resp, err := httpGetMetadataMTLSConfig(logger)
+ if err != nil {
+ return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err)
+ }
+ var config mtlsConfig
+ err = json.Unmarshal([]byte(resp), &config)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err)
+ }
+ if config.S2A == nil {
+ return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config)
+ }
+ return &config, nil
+}
+
+func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool {
+ // If client cert is found, use that over S2A.
+ if clientCertSource != nil {
+ return false
+ }
+ // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A.
+ if !isGoogleS2AEnabled() {
+ return false
+ }
+ // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A.
+ if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" {
+ return false
+ }
+ // If custom HTTP client is provided, skip S2A.
+ if opts.Client != nil {
+ return false
+ }
+ // If directPath is enabled, skip S2A.
+ return !opts.EnableDirectPath && !opts.EnableDirectPathXds
+}
+
+func isGoogleS2AEnabled() bool {
+ b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv))
+ if err != nil {
+ return false
+ }
+ return b
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
new file mode 100644
index 000000000..5c8721efa
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
@@ -0,0 +1,107 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport provided internal helpers for the two transport packages
+// (grpctransport and httptransport).
+package transport
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "time"
+
+ "cloud.google.com/go/auth/credentials"
+)
+
+// CloneDetectOptions clones a user set detect option into some new memory that
+// we can internally manipulate before sending onto the detect package.
+func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions {
+ if oldDo == nil {
+ // it is valid for users not to set this, but we will need to to default
+ // some options for them in this case so return some initialized memory
+ // to work with.
+ return &credentials.DetectOptions{}
+ }
+ newDo := &credentials.DetectOptions{
+ // Simple types
+ TokenBindingType: oldDo.TokenBindingType,
+ Audience: oldDo.Audience,
+ Subject: oldDo.Subject,
+ EarlyTokenRefresh: oldDo.EarlyTokenRefresh,
+ TokenURL: oldDo.TokenURL,
+ STSAudience: oldDo.STSAudience,
+ CredentialsFile: oldDo.CredentialsFile,
+ UseSelfSignedJWT: oldDo.UseSelfSignedJWT,
+ UniverseDomain: oldDo.UniverseDomain,
+
+ // These fields are pointer types that we just want to use exactly as
+ // the user set, copy the ref
+ Client: oldDo.Client,
+ Logger: oldDo.Logger,
+ AuthHandlerOptions: oldDo.AuthHandlerOptions,
+ }
+
+ // Smartly size this memory and copy below.
+ if len(oldDo.CredentialsJSON) > 0 {
+ newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON))
+ copy(newDo.CredentialsJSON, oldDo.CredentialsJSON)
+ }
+ if len(oldDo.Scopes) > 0 {
+ newDo.Scopes = make([]string, len(oldDo.Scopes))
+ copy(newDo.Scopes, oldDo.Scopes)
+ }
+
+ return newDo
+}
+
+// ValidateUniverseDomain verifies that the universe domain configured for the
+// client matches the universe domain configured for the credentials.
+func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error {
+ if clientUniverseDomain != credentialsUniverseDomain {
+ return fmt.Errorf(
+ "the configured universe domain (%q) does not match the universe "+
+ "domain found in the credentials (%q). If you haven't configured "+
+ "the universe domain explicitly, \"googleapis.com\" is the default",
+ clientUniverseDomain,
+ credentialsUniverseDomain)
+ }
+ return nil
+}
+
+// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS.
+func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client {
+ trans := BaseTransport()
+ trans.TLSClientConfig = tlsConfig
+ return &http.Client{Transport: trans}
+}
+
+// BaseTransport returns a default [http.Transport] which can be used if
+// [http.DefaultTransport] has been overwritten.
+func BaseTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ MaxIdleConnsPerHost: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
new file mode 100644
index 000000000..42716752e
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
@@ -0,0 +1,82 @@
+# Changelog
+
+## [0.2.8](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.7...auth/oauth2adapt/v0.2.8) (2025-03-17)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd))
+
+## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161)
+
+## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52))
+
+## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
+## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+
+## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
+
+## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800)
+
+## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16)
+
+
+### Features
+
+* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
+
+## 0.1.0 (2023-10-19)
+
+
+### Features
+
+* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f))
+* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
new file mode 100644
index 000000000..9cc33e5ee
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
@@ -0,0 +1,200 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth]
+// and [golang.org/x/oauth2].
+package oauth2adapt
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+
+ "cloud.google.com/go/auth"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+)
+
+const (
+ oauth2TokenSourceKey = "oauth2.google.tokenSource"
+ oauth2ServiceAccountKey = "oauth2.google.serviceAccount"
+ authTokenSourceKey = "auth.google.tokenSource"
+ authServiceAccountKey = "auth.google.serviceAccount"
+)
+
+// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource]
+// into a [cloud.google.com/go/auth.TokenProvider].
+func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider {
+ return &tokenProviderAdapter{ts: ts}
+}
+
+type tokenProviderAdapter struct {
+ ts oauth2.TokenSource
+}
+
+// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It
+// is a light wrapper around the underlying TokenSource.
+func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) {
+ tok, err := tp.ts.Token()
+ if err != nil {
+ var err2 *oauth2.RetrieveError
+ if ok := errors.As(err, &err2); ok {
+ return nil, AuthErrorFromRetrieveError(err2)
+ }
+ return nil, err
+ }
+ // Preserve compute token metadata, for both types of tokens.
+ metadata := map[string]interface{}{}
+ if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok {
+ metadata[authTokenSourceKey] = val
+ metadata[oauth2TokenSourceKey] = val
+ }
+ if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok {
+ metadata[authServiceAccountKey] = val
+ metadata[oauth2ServiceAccountKey] = val
+ }
+ return &auth.Token{
+ Value: tok.AccessToken,
+ Type: tok.Type(),
+ Expiry: tok.Expiry,
+ Metadata: metadata,
+ }, nil
+}
+
+// TokenSourceFromTokenProvider converts any
+// [cloud.google.com/go/auth.TokenProvider] into a
+// [golang.org/x/oauth2.TokenSource].
+func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource {
+ return &tokenSourceAdapter{tp: tp}
+}
+
+type tokenSourceAdapter struct {
+ tp auth.TokenProvider
+}
+
+// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It
+// is a light wrapper around the underlying TokenProvider.
+func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) {
+ tok, err := ts.tp.Token(context.Background())
+ if err != nil {
+ var err2 *auth.Error
+ if ok := errors.As(err, &err2); ok {
+ return nil, AddRetrieveErrorToAuthError(err2)
+ }
+ return nil, err
+ }
+ tok2 := &oauth2.Token{
+ AccessToken: tok.Value,
+ TokenType: tok.Type,
+ Expiry: tok.Expiry,
+ }
+ // Preserve token metadata.
+ m := tok.Metadata
+ if m != nil {
+ // Copy map to avoid concurrent map writes error (#11161).
+ metadata := make(map[string]interface{}, len(m)+2)
+ for k, v := range m {
+ metadata[k] = v
+ }
+ // Append compute token metadata in converted form.
+ if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" {
+ metadata[oauth2TokenSourceKey] = val
+ }
+ if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" {
+ metadata[oauth2ServiceAccountKey] = val
+ }
+ tok2 = tok2.WithExtra(metadata)
+ }
+ return tok2, nil
+}
+
+// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials]
+// to a [cloud.google.com/go/auth.Credentials].
+func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials {
+ if creds == nil {
+ return nil
+ }
+ return auth.NewCredentials(&auth.CredentialsOptions{
+ TokenProvider: TokenProviderFromTokenSource(creds.TokenSource),
+ JSON: creds.JSON,
+ ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
+ return creds.ProjectID, nil
+ }),
+ UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
+ return creds.GetUniverseDomain()
+ }),
+ })
+}
+
+// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials]
+// to a [golang.org/x/oauth2/google.Credentials].
+func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials {
+ if creds == nil {
+ return nil
+ }
+ // Throw away errors as old credentials are not request aware. Also, no
+ // network requests are currently happening for this use case.
+ projectID, _ := creds.ProjectID(context.Background())
+
+ return &google.Credentials{
+ TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider),
+ ProjectID: projectID,
+ JSON: creds.JSON(),
+ UniverseDomainProvider: func() (string, error) {
+ return creds.UniverseDomain(context.Background())
+ },
+ }
+}
+
+type oauth2Error struct {
+ ErrorCode string `json:"error"`
+ ErrorDescription string `json:"error_description"`
+ ErrorURI string `json:"error_uri"`
+}
+
+// AddRetrieveErrorToAuthError returns the same error provided and adds a
+// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the
+// [cloud.google.com/go/auth.Error].
+func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error {
+ if err == nil {
+ return nil
+ }
+ e := &oauth2.RetrieveError{
+ Response: err.Response,
+ Body: err.Body,
+ }
+ err.Err = e
+ if len(err.Body) > 0 {
+ var oErr oauth2Error
+ // ignore the error as it only fills in extra details
+ json.Unmarshal(err.Body, &oErr)
+ e.ErrorCode = oErr.ErrorCode
+ e.ErrorDescription = oErr.ErrorDescription
+ e.ErrorURI = oErr.ErrorURI
+ }
+ return err
+}
+
+// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that
+// wraps the provided [golang.org/x/oauth2.RetrieveError].
+func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error {
+ if err == nil {
+ return nil
+ }
+ return &auth.Error{
+ Response: err.Response,
+ Body: err.Body,
+ Err: err,
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go
new file mode 100644
index 000000000..07804dc16
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/threelegged.go
@@ -0,0 +1,382 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for
+// OAuth consent at the specified auth code URL and returns an auth code and
+// state upon approval.
+type AuthorizationHandler func(authCodeURL string) (code string, state string, err error)
+
+// Options3LO are the options for doing a 3-legged OAuth2 flow.
+type Options3LO struct {
+ // ClientID is the application's ID.
+ ClientID string
+ // ClientSecret is the application's secret. Not required if AuthHandlerOpts
+ // is set.
+ ClientSecret string
+ // AuthURL is the URL for authenticating.
+ AuthURL string
+ // TokenURL is the URL for retrieving a token.
+ TokenURL string
+ // AuthStyle is used to describe how to client info in the token request.
+ AuthStyle Style
+ // RefreshToken is the token used to refresh the credential. Not required
+ // if AuthHandlerOpts is set.
+ RefreshToken string
+ // RedirectURL is the URL to redirect users to. Optional.
+ RedirectURL string
+ // Scopes specifies requested permissions for the Token. Optional.
+ Scopes []string
+
+ // URLParams are the set of values to apply to the token exchange. Optional.
+ URLParams url.Values
+ // Client is the client to be used to make the underlying token requests.
+ // Optional.
+ Client *http.Client
+ // EarlyTokenExpiry is the time before the token expires that it should be
+ // refreshed. If not set the default value is 3 minutes and 45 seconds.
+ // Optional.
+ EarlyTokenExpiry time.Duration
+
+ // AuthHandlerOpts provides a set of options for doing a
+ // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional.
+ AuthHandlerOpts *AuthorizationHandlerOptions
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
+}
+
+func (o *Options3LO) validate() error {
+ if o == nil {
+ return errors.New("auth: options must be provided")
+ }
+ if o.ClientID == "" {
+ return errors.New("auth: client ID must be provided")
+ }
+ if o.AuthHandlerOpts == nil && o.ClientSecret == "" {
+ return errors.New("auth: client secret must be provided")
+ }
+ if o.AuthURL == "" {
+ return errors.New("auth: auth URL must be provided")
+ }
+ if o.TokenURL == "" {
+ return errors.New("auth: token URL must be provided")
+ }
+ if o.AuthStyle == StyleUnknown {
+ return errors.New("auth: auth style must be provided")
+ }
+ if o.AuthHandlerOpts == nil && o.RefreshToken == "" {
+ return errors.New("auth: refresh token must be provided")
+ }
+ return nil
+}
+
+func (o *Options3LO) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
+// PKCEOptions holds parameters to support PKCE.
+type PKCEOptions struct {
+ // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier.
+ Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier.
+ // ChallengeMethod is the encryption method (ex. S256).
+ ChallengeMethod string
+ // Verifier is the original, non-encrypted secret.
+ Verifier string // The original, non-encrypted secret.
+}
+
+type tokenJSON struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn int `json:"expires_in"`
+ // error fields
+ ErrorCode string `json:"error"`
+ ErrorDescription string `json:"error_description"`
+ ErrorURI string `json:"error_uri"`
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+ if v := e.ExpiresIn; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ return
+}
+
+func (o *Options3LO) client() *http.Client {
+ if o.Client != nil {
+ return o.Client
+ }
+ return internal.DefaultClient()
+}
+
+// authCodeURL returns a URL that points to a OAuth2 consent page.
+func (o *Options3LO) authCodeURL(state string, values url.Values) string {
+ var buf bytes.Buffer
+ buf.WriteString(o.AuthURL)
+ v := url.Values{
+ "response_type": {"code"},
+ "client_id": {o.ClientID},
+ }
+ if o.RedirectURL != "" {
+ v.Set("redirect_uri", o.RedirectURL)
+ }
+ if len(o.Scopes) > 0 {
+ v.Set("scope", strings.Join(o.Scopes, " "))
+ }
+ if state != "" {
+ v.Set("state", state)
+ }
+ if o.AuthHandlerOpts != nil {
+ if o.AuthHandlerOpts.PKCEOpts != nil &&
+ o.AuthHandlerOpts.PKCEOpts.Challenge != "" {
+ v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge)
+ }
+ if o.AuthHandlerOpts.PKCEOpts != nil &&
+ o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" {
+ v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod)
+ }
+ }
+ for k := range values {
+ v.Set(k, v.Get(k))
+ }
+ if strings.Contains(o.AuthURL, "?") {
+ buf.WriteByte('&')
+ } else {
+ buf.WriteByte('?')
+ }
+ buf.WriteString(v.Encode())
+ return buf.String()
+}
+
+// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2
+// configuration. The TokenProvider is caches and auto-refreshes tokens by
+// default.
+func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) {
+ if err := opts.validate(); err != nil {
+ return nil, err
+ }
+ if opts.AuthHandlerOpts != nil {
+ return new3LOTokenProviderWithAuthHandler(opts), nil
+ }
+ return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{
+ ExpireEarly: opts.EarlyTokenExpiry,
+ }), nil
+}
+
+// AuthorizationHandlerOptions provides a set of options to specify for doing a
+// 3-legged OAuth2 flow with a custom [AuthorizationHandler].
+type AuthorizationHandlerOptions struct {
+ // AuthorizationHandler specifies the handler used to for the authorization
+ // part of the flow.
+ Handler AuthorizationHandler
+ // State is used verify that the "state" is identical in the request and
+ // response before exchanging the auth code for OAuth2 token.
+ State string
+ // PKCEOpts allows setting configurations for PKCE. Optional.
+ PKCEOpts *PKCEOptions
+}
+
+func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider {
+ return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{
+ ExpireEarly: opts.EarlyTokenExpiry,
+ })
+}
+
+// exchange handles the final exchange portion of the 3lo flow. Returns a Token,
+// refreshToken, and error.
+func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) {
+ // Build request
+ v := url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ }
+ if o.RedirectURL != "" {
+ v.Set("redirect_uri", o.RedirectURL)
+ }
+ if o.AuthHandlerOpts != nil &&
+ o.AuthHandlerOpts.PKCEOpts != nil &&
+ o.AuthHandlerOpts.PKCEOpts.Verifier != "" {
+ v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier)
+ }
+ for k := range o.URLParams {
+ v.Set(k, o.URLParams.Get(k))
+ }
+ return fetchToken(ctx, o, v)
+}
+
+// This struct is not safe for concurrent access alone, but the way it is used
+// in this package by wrapping it with a cachedTokenProvider makes it so.
+type tokenProvider3LO struct {
+ opts *Options3LO
+ client *http.Client
+ refreshToken string
+}
+
+func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) {
+ if tp.refreshToken == "" {
+ return nil, errors.New("auth: token expired and refresh token is not set")
+ }
+ v := url.Values{
+ "grant_type": {"refresh_token"},
+ "refresh_token": {tp.refreshToken},
+ }
+ for k := range tp.opts.URLParams {
+ v.Set(k, tp.opts.URLParams.Get(k))
+ }
+
+ tk, rt, err := fetchToken(ctx, tp.opts, v)
+ if err != nil {
+ return nil, err
+ }
+ if tp.refreshToken != rt && rt != "" {
+ tp.refreshToken = rt
+ }
+ return tk, err
+}
+
+type tokenProviderWithHandler struct {
+ opts *Options3LO
+ state string
+}
+
+func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) {
+ url := tp.opts.authCodeURL(tp.state, nil)
+ code, state, err := tp.opts.AuthHandlerOpts.Handler(url)
+ if err != nil {
+ return nil, err
+ }
+ if state != tp.state {
+ return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow")
+ }
+ tok, _, err := tp.opts.exchange(ctx, code)
+ return tok, err
+}
+
+// fetchToken returns a Token, refresh token, and/or an error.
+func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) {
+ var refreshToken string
+ if o.AuthStyle == StyleInParams {
+ if o.ClientID != "" {
+ v.Set("client_id", o.ClientID)
+ }
+ if o.ClientSecret != "" {
+ v.Set("client_secret", o.ClientSecret)
+ }
+ }
+ req, err := http.NewRequestWithContext(ctx, "POST", o.TokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, refreshToken, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if o.AuthStyle == StyleInHeader {
+ req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret))
+ }
+ logger := o.logger()
+
+ logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
+ // Make request
+ resp, body, err := internal.DoRequest(o.client(), req)
+ if err != nil {
+ return nil, refreshToken, err
+ }
+ logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body))
+ failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299
+ tokError := &Error{
+ Response: resp,
+ Body: body,
+ }
+
+ var token *Token
+ // errors ignored because of default switch on content
+ content, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type"))
+ switch content {
+ case "application/x-www-form-urlencoded", "text/plain":
+ // some endpoints return a query string
+ vals, err := url.ParseQuery(string(body))
+ if err != nil {
+ if failureStatus {
+ return nil, refreshToken, tokError
+ }
+ return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err)
+ }
+ tokError.code = vals.Get("error")
+ tokError.description = vals.Get("error_description")
+ tokError.uri = vals.Get("error_uri")
+ token = &Token{
+ Value: vals.Get("access_token"),
+ Type: vals.Get("token_type"),
+ Metadata: make(map[string]interface{}, len(vals)),
+ }
+ for k, v := range vals {
+ token.Metadata[k] = v
+ }
+ refreshToken = vals.Get("refresh_token")
+ e := vals.Get("expires_in")
+ expires, _ := strconv.Atoi(e)
+ if expires != 0 {
+ token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+ }
+ default:
+ var tj tokenJSON
+ if err = json.Unmarshal(body, &tj); err != nil {
+ if failureStatus {
+ return nil, refreshToken, tokError
+ }
+ return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err)
+ }
+ tokError.code = tj.ErrorCode
+ tokError.description = tj.ErrorDescription
+ tokError.uri = tj.ErrorURI
+ token = &Token{
+ Value: tj.AccessToken,
+ Type: tj.TokenType,
+ Expiry: tj.expiry(),
+ Metadata: make(map[string]interface{}),
+ }
+ json.Unmarshal(body, &token.Metadata) // optional field, skip err check
+ refreshToken = tj.RefreshToken
+ }
+ // according to spec, servers should respond status 400 in error case
+ // https://www.rfc-editor.org/rfc/rfc6749#section-5.2
+ // but some unorthodox servers respond 200 in error case
+ if failureStatus || tokError.code != "" {
+ return nil, refreshToken, tokError
+ }
+ if token.Value == "" {
+ return nil, refreshToken, errors.New("auth: server response missing access_token")
+ }
+ return token, refreshToken, nil
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index 967e06074..1f848ce0b 100644
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,5 +1,52 @@
# Changes
+## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.6.0...compute/metadata/v0.7.0) (2025-05-13)
+
+
+### Features
+
+* **compute/metadata:** Allow canceling GCE detection ([#11786](https://github.com/googleapis/google-cloud-go/issues/11786)) ([78100fe](https://github.com/googleapis/google-cloud-go/commit/78100fe7e28cd30f1e10b47191ac3c9839663b64))
+
+## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13)
+
+
+### Features
+
+* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f))
+
+## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f))
+
+## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd))
+
+## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10)
+
+
+### Features
+
+* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302))
+
+## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01)
+
+
+### Features
+
+* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3))
+
+
+### Documentation
+
+* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f))
+
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15)
diff --git a/vendor/cloud.google.com/go/compute/metadata/log.go b/vendor/cloud.google.com/go/compute/metadata/log.go
new file mode 100644
index 000000000..8ec673b88
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/log.go
@@ -0,0 +1,149 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metadata
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+)
+
+// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog
+// to avoid the dependency. The compute/metadata module is used by too many
+// non-client library modules that can't justify the dependency.
+
+// The handler returned if logging is not enabled.
+type noOpHandler struct{}
+
+func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool {
+ return false
+}
+
+func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error {
+ return nil
+}
+
+func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler {
+ return h
+}
+
+func (h noOpHandler) WithGroup(_ string) slog.Handler {
+ return h
+}
+
+// httpRequest returns a lazily evaluated [slog.LogValuer] for a
+// [http.Request] and the associated body.
+func httpRequest(req *http.Request, body []byte) slog.LogValuer {
+ return &request{
+ req: req,
+ payload: body,
+ }
+}
+
+type request struct {
+ req *http.Request
+ payload []byte
+}
+
+func (r *request) LogValue() slog.Value {
+ if r == nil || r.req == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method))
+ groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String()))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.req.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+// httpResponse returns a lazily evaluated [slog.LogValuer] for a
+// [http.Response] and the associated body.
+func httpResponse(resp *http.Response, body []byte) slog.LogValuer {
+ return &response{
+ resp: resp,
+ payload: body,
+ }
+}
+
+type response struct {
+ resp *http.Response
+ payload []byte
+}
+
+func (r *response) LogValue() slog.Value {
+ if r == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode)))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.resp.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+func processPayload(payload []byte) (slog.Attr, bool) {
+ peekChar := payload[0]
+ if peekChar == '{' {
+ // JSON object
+ var m map[string]any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else if peekChar == '[' {
+ // JSON array
+ var m []any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else {
+ // Everything else
+ buf := &bytes.Buffer{}
+ if err := json.Compact(buf, payload); err != nil {
+ // Write raw payload incase of error
+ buf.Write(payload)
+ }
+ return slog.String("payload", buf.String()), true
+ }
+ return slog.Attr{}, false
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index f67e3c7ee..322be8032 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -24,11 +24,11 @@ import (
"encoding/json"
"fmt"
"io"
+ "log/slog"
"net"
"net/http"
"net/url"
"os"
- "runtime"
"strings"
"sync"
"time"
@@ -61,7 +61,10 @@ var (
instID = &cachedValue{k: "instance/id", trim: true}
)
-var defaultClient = &Client{hc: newDefaultHTTPClient()}
+var defaultClient = &Client{
+ hc: newDefaultHTTPClient(),
+ logger: slog.New(noOpHandler{}),
+}
func newDefaultHTTPClient() *http.Client {
return &http.Client{
@@ -88,16 +91,16 @@ func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
-func (c *cachedValue) get(cl *Client) (v string, err error) {
+func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
- v, err = cl.getTrimmed(context.Background(), c.k)
+ v, err = cl.getTrimmed(ctx, c.k)
} else {
- v, err = cl.GetWithContext(context.Background(), c.k)
+ v, err = cl.GetWithContext(ctx, c.k)
}
if err == nil {
c.v = v
@@ -110,99 +113,27 @@ var (
onGCE bool
)
-// OnGCE reports whether this process is running on Google Compute Engine.
+// OnGCE reports whether this process is running on Google Compute Platforms.
+// NOTE: True returned from `OnGCE` does not guarantee that the metadata server
+// is accessible from this process and have all the metadata defined.
func OnGCE() bool {
- onGCEOnce.Do(initOnGCE)
- return onGCE
-}
-
-func initOnGCE() {
- onGCE = testOnGCE()
+ return OnGCEWithContext(context.Background())
}
-func testOnGCE() bool {
- // The user explicitly said they're on GCE, so trust them.
- if os.Getenv(metadataHostEnv) != "" {
- return true
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- resc := make(chan bool, 2)
-
- // Try two strategies in parallel.
- // See https://github.com/googleapis/google-cloud-go/issues/194
- go func() {
- req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
- req.Header.Set("User-Agent", userAgent)
- res, err := newDefaultHTTPClient().Do(req.WithContext(ctx))
- if err != nil {
- resc <- false
- return
- }
- defer res.Body.Close()
- resc <- res.Header.Get("Metadata-Flavor") == "Google"
- }()
-
- go func() {
- resolver := &net.Resolver{}
- addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
- if err != nil || len(addrs) == 0 {
- resc <- false
- return
- }
- resc <- strsContains(addrs, metadataIP)
- }()
-
- tryHarder := systemInfoSuggestsGCE()
- if tryHarder {
- res := <-resc
- if res {
- // The first strategy succeeded, so let's use it.
- return true
- }
- // Wait for either the DNS or metadata server probe to
- // contradict the other one and say we are running on
- // GCE. Give it a lot of time to do so, since the system
- // info already suggests we're running on a GCE BIOS.
- timer := time.NewTimer(5 * time.Second)
- defer timer.Stop()
- select {
- case res = <-resc:
- return res
- case <-timer.C:
- // Too slow. Who knows what this system is.
- return false
- }
- }
-
- // There's no hint from the system info that we're running on
- // GCE, so use the first probe's result as truth, whether it's
- // true or false. The goal here is to optimize for speed for
- // users who are NOT running on GCE. We can't assume that
- // either a DNS lookup or an HTTP request to a blackholed IP
- // address is fast. Worst case this should return when the
- // metaClient's Transport.ResponseHeaderTimeout or
- // Transport.Dial.Timeout fires (in two seconds).
- return <-resc
-}
-
-// systemInfoSuggestsGCE reports whether the local system (without
-// doing network requests) suggests that we're running on GCE. If this
-// returns true, testOnGCE tries a bit harder to reach its metadata
-// server.
-func systemInfoSuggestsGCE() bool {
- if runtime.GOOS != "linux" {
- // We don't have any non-Linux clues available, at least yet.
- return false
- }
- slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name")
- name := strings.TrimSpace(string(slurp))
- return name == "Google" || name == "Google Compute Engine"
+// OnGCEWithContext reports whether this process is running on Google Compute Platforms.
+// This function's return value is memoized for better performance.
+// NOTE: True returned from `OnGCEWithContext` does not guarantee that the metadata server
+// is accessible from this process and have all the metadata defined.
+func OnGCEWithContext(ctx context.Context) bool {
+ onGCEOnce.Do(func() {
+ onGCE = defaultClient.OnGCEWithContext(ctx)
+ })
+ return onGCE
}
// Subscribe calls Client.SubscribeWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [SubscribeWithContext].
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
}
@@ -225,55 +156,188 @@ func GetWithContext(ctx context.Context, suffix string) (string, error) {
}
// ProjectID returns the current instance's project ID string.
-func ProjectID() (string, error) { return defaultClient.ProjectID() }
+//
+// Deprecated: Please use the context aware variant [ProjectIDWithContext].
+func ProjectID() (string, error) {
+ return defaultClient.ProjectIDWithContext(context.Background())
+}
+
+// ProjectIDWithContext returns the current instance's project ID string.
+func ProjectIDWithContext(ctx context.Context) (string, error) {
+ return defaultClient.ProjectIDWithContext(ctx)
+}
// NumericProjectID returns the current instance's numeric project ID.
-func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
+//
+// Deprecated: Please use the context aware variant [NumericProjectIDWithContext].
+func NumericProjectID() (string, error) {
+ return defaultClient.NumericProjectIDWithContext(context.Background())
+}
+
+// NumericProjectIDWithContext returns the current instance's numeric project ID.
+func NumericProjectIDWithContext(ctx context.Context) (string, error) {
+ return defaultClient.NumericProjectIDWithContext(ctx)
+}
// InternalIP returns the instance's primary internal IP address.
-func InternalIP() (string, error) { return defaultClient.InternalIP() }
+//
+// Deprecated: Please use the context aware variant [InternalIPWithContext].
+func InternalIP() (string, error) {
+ return defaultClient.InternalIPWithContext(context.Background())
+}
+
+// InternalIPWithContext returns the instance's primary internal IP address.
+func InternalIPWithContext(ctx context.Context) (string, error) {
+ return defaultClient.InternalIPWithContext(ctx)
+}
// ExternalIP returns the instance's primary external (public) IP address.
-func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
+//
+// Deprecated: Please use the context aware variant [ExternalIPWithContext].
+func ExternalIP() (string, error) {
+ return defaultClient.ExternalIPWithContext(context.Background())
+}
+
+// ExternalIPWithContext returns the instance's primary external (public) IP address.
+func ExternalIPWithContext(ctx context.Context) (string, error) {
+ return defaultClient.ExternalIPWithContext(ctx)
+}
+
+// Email calls Client.EmailWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [EmailWithContext].
+func Email(serviceAccount string) (string, error) {
+ return defaultClient.EmailWithContext(context.Background(), serviceAccount)
+}
-// Email calls Client.Email on the default client.
-func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
+// EmailWithContext calls Client.EmailWithContext on the default client.
+func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) {
+ return defaultClient.EmailWithContext(ctx, serviceAccount)
+}
// Hostname returns the instance's hostname. This will be of the form
// ".c..internal".
-func Hostname() (string, error) { return defaultClient.Hostname() }
+//
+// Deprecated: Please use the context aware variant [HostnameWithContext].
+func Hostname() (string, error) {
+ return defaultClient.HostnameWithContext(context.Background())
+}
+
+// HostnameWithContext returns the instance's hostname. This will be of the form
+// ".c..internal".
+func HostnameWithContext(ctx context.Context) (string, error) {
+ return defaultClient.HostnameWithContext(ctx)
+}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
-func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
+//
+// Deprecated: Please use the context aware variant [InstanceTagsWithContext].
+func InstanceTags() ([]string, error) {
+ return defaultClient.InstanceTagsWithContext(context.Background())
+}
+
+// InstanceTagsWithContext returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTagsWithContext(ctx context.Context) ([]string, error) {
+ return defaultClient.InstanceTagsWithContext(ctx)
+}
// InstanceID returns the current VM's numeric instance ID.
-func InstanceID() (string, error) { return defaultClient.InstanceID() }
+//
+// Deprecated: Please use the context aware variant [InstanceIDWithContext].
+func InstanceID() (string, error) {
+ return defaultClient.InstanceIDWithContext(context.Background())
+}
+
+// InstanceIDWithContext returns the current VM's numeric instance ID.
+func InstanceIDWithContext(ctx context.Context) (string, error) {
+ return defaultClient.InstanceIDWithContext(ctx)
+}
// InstanceName returns the current VM's instance ID string.
-func InstanceName() (string, error) { return defaultClient.InstanceName() }
+//
+// Deprecated: Please use the context aware variant [InstanceNameWithContext].
+func InstanceName() (string, error) {
+ return defaultClient.InstanceNameWithContext(context.Background())
+}
+
+// InstanceNameWithContext returns the current VM's instance ID string.
+func InstanceNameWithContext(ctx context.Context) (string, error) {
+ return defaultClient.InstanceNameWithContext(ctx)
+}
// Zone returns the current VM's zone, such as "us-central1-b".
-func Zone() (string, error) { return defaultClient.Zone() }
+//
+// Deprecated: Please use the context aware variant [ZoneWithContext].
+func Zone() (string, error) {
+ return defaultClient.ZoneWithContext(context.Background())
+}
-// InstanceAttributes calls Client.InstanceAttributes on the default client.
-func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
+// ZoneWithContext returns the current VM's zone, such as "us-central1-b".
+func ZoneWithContext(ctx context.Context) (string, error) {
+ return defaultClient.ZoneWithContext(ctx)
+}
-// ProjectAttributes calls Client.ProjectAttributes on the default client.
-func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
+// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [InstanceAttributesWithContext.
+func InstanceAttributes() ([]string, error) {
+ return defaultClient.InstanceAttributesWithContext(context.Background())
+}
-// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
+// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client.
+func InstanceAttributesWithContext(ctx context.Context) ([]string, error) {
+ return defaultClient.InstanceAttributesWithContext(ctx)
+}
+
+// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [ProjectAttributesWithContext].
+func ProjectAttributes() ([]string, error) {
+ return defaultClient.ProjectAttributesWithContext(context.Background())
+}
+
+// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client.
+func ProjectAttributesWithContext(ctx context.Context) ([]string, error) {
+ return defaultClient.ProjectAttributesWithContext(ctx)
+}
+
+// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext].
func InstanceAttributeValue(attr string) (string, error) {
- return defaultClient.InstanceAttributeValue(attr)
+ return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr)
}
-// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
+// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client.
+func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
+ return defaultClient.InstanceAttributeValueWithContext(ctx, attr)
+}
+
+// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext].
func ProjectAttributeValue(attr string) (string, error) {
- return defaultClient.ProjectAttributeValue(attr)
+ return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr)
+}
+
+// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client.
+func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
+ return defaultClient.ProjectAttributeValueWithContext(ctx, attr)
}
-// Scopes calls Client.Scopes on the default client.
-func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
+// Scopes calls Client.ScopesWithContext on the default client.
+//
+// Deprecated: Please use the context aware variant [ScopesWithContext].
+func Scopes(serviceAccount string) ([]string, error) {
+ return defaultClient.ScopesWithContext(context.Background(), serviceAccount)
+}
+
+// ScopesWithContext calls Client.ScopesWithContext on the default client.
+func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) {
+ return defaultClient.ScopesWithContext(ctx, serviceAccount)
+}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
@@ -286,18 +350,120 @@ func strsContains(ss []string, s string) bool {
// A Client provides metadata.
type Client struct {
- hc *http.Client
+ hc *http.Client
+ logger *slog.Logger
+}
+
+// Options for configuring a [Client].
+type Options struct {
+ // Client is the HTTP client used to make requests. Optional.
+ Client *http.Client
+ // Logger is used to log information about HTTP request and responses.
+ // If not provided, nothing will be logged. Optional.
+ Logger *slog.Logger
}
// NewClient returns a Client that can be used to fetch metadata.
// Returns the client that uses the specified http.Client for HTTP requests.
// If nil is specified, returns the default client.
func NewClient(c *http.Client) *Client {
- if c == nil {
+ return NewWithOptions(&Options{
+ Client: c,
+ })
+}
+
+// NewWithOptions returns a Client that is configured with the provided Options.
+func NewWithOptions(opts *Options) *Client {
+ if opts == nil {
return defaultClient
}
+ client := opts.Client
+ if client == nil {
+ client = newDefaultHTTPClient()
+ }
+ logger := opts.Logger
+ if logger == nil {
+ logger = slog.New(noOpHandler{})
+ }
+ return &Client{hc: client, logger: logger}
+}
- return &Client{hc: c}
+// NOTE: metadataRequestStrategy is assigned to a variable for test stubbing purposes.
+var metadataRequestStrategy = func(ctx context.Context, httpClient *http.Client, resc chan bool) {
+ req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
+ req.Header.Set("User-Agent", userAgent)
+ res, err := httpClient.Do(req.WithContext(ctx))
+ if err != nil {
+ resc <- false
+ return
+ }
+ defer res.Body.Close()
+ resc <- res.Header.Get("Metadata-Flavor") == "Google"
+}
+
+// NOTE: dnsRequestStrategy is assigned to a variable for test stubbing purposes.
+var dnsRequestStrategy = func(ctx context.Context, resc chan bool) {
+ resolver := &net.Resolver{}
+ addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
+ if err != nil || len(addrs) == 0 {
+ resc <- false
+ return
+ }
+ resc <- strsContains(addrs, metadataIP)
+}
+
+// OnGCEWithContext reports whether this process is running on Google Compute Platforms.
+// NOTE: True returned from `OnGCEWithContext` does not guarantee that the metadata server
+// is accessible from this process and have all the metadata defined.
+func (c *Client) OnGCEWithContext(ctx context.Context) bool {
+ // The user explicitly said they're on GCE, so trust them.
+ if os.Getenv(metadataHostEnv) != "" {
+ return true
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ resc := make(chan bool, 2)
+
+ // Try two strategies in parallel.
+ // See https://github.com/googleapis/google-cloud-go/issues/194
+ go metadataRequestStrategy(ctx, c.hc, resc)
+ go dnsRequestStrategy(ctx, resc)
+
+ tryHarder := systemInfoSuggestsGCE()
+ if tryHarder {
+ res := <-resc
+ if res {
+ // The first strategy succeeded, so let's use it.
+ return true
+ }
+
+ // Wait for either the DNS or metadata server probe to
+ // contradict the other one and say we are running on
+ // GCE. Give it a lot of time to do so, since the system
+ // info already suggests we're running on a GCE BIOS.
+ // Ensure cancellations from the calling context are respected.
+ waitContext, cancelWait := context.WithTimeout(ctx, 5*time.Second)
+ defer cancelWait()
+ select {
+ case res = <-resc:
+ return res
+ case <-waitContext.Done():
+ // Too slow. Who knows what this system is.
+ return false
+ }
+ }
+
+ // There's no hint from the system info that we're running on
+ // GCE, so use the first probe's result as truth, whether it's
+ // true or false. The goal here is to optimize for speed for
+ // users who are NOT running on GCE. We can't assume that
+ // either a DNS lookup or an HTTP request to a blackholed IP
+ // address is fast. Worst case this should return when the
+ // metaClient's Transport.ResponseHeaderTimeout or
+ // Transport.Dial.Timeout fires (in two seconds).
+ return <-resc
}
// getETag returns a value from the metadata service as well as the associated ETag.
@@ -327,14 +493,26 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
req.Header.Set("User-Agent", userAgent)
var res *http.Response
var reqErr error
+ var body []byte
retryer := newRetryer()
for {
+ c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil))
res, reqErr = c.hc.Do(req)
var code int
if res != nil {
code = res.StatusCode
+ body, err = io.ReadAll(res.Body)
+ if err != nil {
+ res.Body.Close()
+ return "", "", err
+ }
+ c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body))
+ res.Body.Close()
}
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
+ if res != nil && res.Body != nil {
+ res.Body.Close()
+ }
if err := sleep(ctx, delay); err != nil {
return "", "", err
}
@@ -345,18 +523,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
if reqErr != nil {
return "", "", reqErr
}
- defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
- all, err := io.ReadAll(res.Body)
- if err != nil {
- return "", "", err
- }
if res.StatusCode != 200 {
- return "", "", &Error{Code: res.StatusCode, Message: string(all)}
+ return "", "", &Error{Code: res.StatusCode, Message: string(body)}
}
- return string(all), res.Header.Get("Etag"), nil
+ return string(body), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
@@ -381,6 +554,10 @@ func (c *Client) Get(suffix string) (string, error) {
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
+//
+// NOTE: Without an extra deadline in the context this call can take in the
+// worst case, with internal backoff retries, up to 15 seconds (e.g. when server
+// is responding slowly). Pass context with additional timeouts when needed.
func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) {
val, _, err := c.getETag(ctx, suffix)
return val, err
@@ -392,8 +569,8 @@ func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err e
return
}
-func (c *Client) lines(suffix string) ([]string, error) {
- j, err := c.GetWithContext(context.Background(), suffix)
+func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) {
+ j, err := c.GetWithContext(ctx, suffix)
if err != nil {
return nil, err
}
@@ -405,45 +582,104 @@ func (c *Client) lines(suffix string) ([]string, error) {
}
// ProjectID returns the current instance's project ID string.
-func (c *Client) ProjectID() (string, error) { return projID.get(c) }
+//
+// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext].
+func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) }
+
+// ProjectIDWithContext returns the current instance's project ID string.
+func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) }
// NumericProjectID returns the current instance's numeric project ID.
-func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
+//
+// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext].
+func (c *Client) NumericProjectID() (string, error) {
+ return c.NumericProjectIDWithContext(context.Background())
+}
+
+// NumericProjectIDWithContext returns the current instance's numeric project ID.
+func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) {
+ return projNum.get(ctx, c)
+}
// InstanceID returns the current VM's numeric instance ID.
-func (c *Client) InstanceID() (string, error) { return instID.get(c) }
+//
+// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext].
+func (c *Client) InstanceID() (string, error) {
+ return c.InstanceIDWithContext(context.Background())
+}
+
+// InstanceIDWithContext returns the current VM's numeric instance ID.
+func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) {
+ return instID.get(ctx, c)
+}
// InternalIP returns the instance's primary internal IP address.
+//
+// Deprecated: Please use the context aware variant [Client.InternalIPWithContext].
func (c *Client) InternalIP() (string, error) {
- return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip")
+ return c.InternalIPWithContext(context.Background())
+}
+
+// InternalIPWithContext returns the instance's primary internal IP address.
+func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) {
+ return c.getTrimmed(ctx, "instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
-// The account may be empty or the string "default" to use the instance's
-// main account.
+//
+// Deprecated: Please use the context aware variant [Client.EmailWithContext].
func (c *Client) Email(serviceAccount string) (string, error) {
+ return c.EmailWithContext(context.Background(), serviceAccount)
+}
+
+// EmailWithContext returns the email address associated with the service account.
+// The serviceAccount parameter default value (empty string or "default" value)
+// will use the instance's main account.
+func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
- return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email")
+ return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
+//
+// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext].
func (c *Client) ExternalIP() (string, error) {
- return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip")
+ return c.ExternalIPWithContext(context.Background())
+}
+
+// ExternalIPWithContext returns the instance's primary external (public) IP address.
+func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) {
+ return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// ".c..internal".
+//
+// Deprecated: Please use the context aware variant [Client.HostnameWithContext].
func (c *Client) Hostname() (string, error) {
- return c.getTrimmed(context.Background(), "instance/hostname")
+ return c.HostnameWithContext(context.Background())
}
-// InstanceTags returns the list of user-defined instance tags,
-// assigned when initially creating a GCE instance.
+// HostnameWithContext returns the instance's hostname. This will be of the form
+// ".c..internal".
+func (c *Client) HostnameWithContext(ctx context.Context) (string, error) {
+ return c.getTrimmed(ctx, "instance/hostname")
+}
+
+// InstanceTags returns the list of user-defined instance tags.
+//
+// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext].
func (c *Client) InstanceTags() ([]string, error) {
+ return c.InstanceTagsWithContext(context.Background())
+}
+
+// InstanceTagsWithContext returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) {
var s []string
- j, err := c.GetWithContext(context.Background(), "instance/tags")
+ j, err := c.GetWithContext(ctx, "instance/tags")
if err != nil {
return nil, err
}
@@ -454,13 +690,27 @@ func (c *Client) InstanceTags() ([]string, error) {
}
// InstanceName returns the current VM's instance ID string.
+//
+// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext].
func (c *Client) InstanceName() (string, error) {
- return c.getTrimmed(context.Background(), "instance/name")
+ return c.InstanceNameWithContext(context.Background())
+}
+
+// InstanceNameWithContext returns the current VM's instance ID string.
+func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) {
+ return c.getTrimmed(ctx, "instance/name")
}
// Zone returns the current VM's zone, such as "us-central1-b".
+//
+// Deprecated: Please use the context aware variant [Client.ZoneWithContext].
func (c *Client) Zone() (string, error) {
- zone, err := c.getTrimmed(context.Background(), "instance/zone")
+ return c.ZoneWithContext(context.Background())
+}
+
+// ZoneWithContext returns the current VM's zone, such as "us-central1-b".
+func (c *Client) ZoneWithContext(ctx context.Context) (string, error) {
+ zone, err := c.getTrimmed(ctx, "instance/zone")
// zone is of the form "projects//zones/".
if err != nil {
return "", err
@@ -471,12 +721,34 @@ func (c *Client) Zone() (string, error) {
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
-func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
+//
+// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext].
+func (c *Client) InstanceAttributes() ([]string, error) {
+ return c.InstanceAttributesWithContext(context.Background())
+}
+
+// InstanceAttributesWithContext returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) {
+ return c.lines(ctx, "instance/attributes/")
+}
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
-func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
+//
+// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext].
+func (c *Client) ProjectAttributes() ([]string, error) {
+ return c.ProjectAttributesWithContext(context.Background())
+}
+
+// ProjectAttributesWithContext returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM. The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) {
+ return c.lines(ctx, "project/attributes/")
+}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
@@ -486,8 +758,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
+//
+// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext].
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
- return c.GetWithContext(context.Background(), "instance/attributes/"+attr)
+ return c.InstanceAttributeValueWithContext(context.Background(), attr)
+}
+
+// InstanceAttributeValueWithContext returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
+ return c.GetWithContext(ctx, "instance/attributes/"+attr)
}
// ProjectAttributeValue returns the value of the provided
@@ -498,18 +784,41 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) {
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
+//
+// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext].
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
- return c.GetWithContext(context.Background(), "project/attributes/"+attr)
+ return c.ProjectAttributeValueWithContext(context.Background(), attr)
+}
+
+// ProjectAttributeValueWithContext returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) {
+ return c.GetWithContext(ctx, "project/attributes/"+attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
+//
+// Deprecated: Please use the context aware variant [Client.ScopesWithContext].
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
+ return c.ScopesWithContext(context.Background(), serviceAccount)
+}
+
+// ScopesWithContext returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
- return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
+ return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes")
}
// Subscribe subscribes to a value from the metadata service.
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
index bb412f891..2e53f0123 100644
--- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
+++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
@@ -17,10 +17,15 @@
package metadata
-import "syscall"
+import (
+ "errors"
+ "syscall"
+)
func init() {
// Initialize syscallRetryable to return true on transient socket-level
// errors. These errors are specific to Linux.
- syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
+ syscallRetryable = func(err error) bool {
+ return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED)
+ }
}
diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go
new file mode 100644
index 000000000..d57ae1b27
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go
@@ -0,0 +1,28 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !linux
+
+package metadata
+
+// systemInfoSuggestsGCE reports whether the local system (without
+// doing network requests) suggests that we're running on GCE. If this
+// returns true, testOnGCE tries a bit harder to reach its metadata
+// server.
+//
+// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes.
+var systemInfoSuggestsGCE = func() bool {
+ // We don't currently have checks for other GOOS
+ return false
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go
new file mode 100644
index 000000000..17ba5a3a2
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go
@@ -0,0 +1,30 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package metadata
+
+import (
+ "os"
+ "strings"
+)
+
+// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes.
+var systemInfoSuggestsGCE = func() bool {
+ b, _ := os.ReadFile("/sys/class/dmi/id/product_name")
+
+ name := strings.TrimSpace(string(b))
+ return name == "Google" || name == "Google Compute Engine"
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go
new file mode 100644
index 000000000..f57a5b14e
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go
@@ -0,0 +1,39 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package metadata
+
+import (
+ "strings"
+
+ "golang.org/x/sys/windows/registry"
+)
+
+// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes.
+var systemInfoSuggestsGCE = func() bool {
+ k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE)
+ if err != nil {
+ return false
+ }
+ defer k.Close()
+
+ s, _, err := k.GetStringValue("SystemProductName")
+ if err != nil {
+ return false
+ }
+ s = strings.TrimSpace(s)
+ return strings.HasPrefix(s, "Google")
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/SECURITY.md b/vendor/github.com/Azure/go-ansiterm/SECURITY.md
new file mode 100644
index 000000000..e138ec5d6
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/SECURITY.md
@@ -0,0 +1,41 @@
+
+
+## Security
+
+Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
+
+If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
+
+If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
+
+You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+ * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+ * Full paths of source file(s) related to the manifestation of the issue
+ * The location of the affected source code (tag/branch/commit or direct URL)
+ * Any special configuration required to reproduce the issue
+ * Step-by-step instructions to reproduce the issue
+ * Proof-of-concept or exploit code (if possible)
+ * Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
+
+## Policy
+
+Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
+
+
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
index b11eb0788..97434ea7f 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md
@@ -160,7 +160,7 @@ if (err == nil) {
```Go
certificatePath := "./example-app.pfx"
-certData, err := ioutil.ReadFile(certificatePath)
+certData, err := os.ReadFile(certificatePath)
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
index 9daa4b58b..f040e2ac6 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go
@@ -27,7 +27,7 @@ import (
"context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
"strings"
@@ -116,7 +116,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf
}
s := v.Encode()
- body := ioutil.NopCloser(strings.NewReader(s))
+ body := io.NopCloser(strings.NewReader(s))
req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body)
if err != nil {
@@ -131,7 +131,7 @@ func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConf
}
defer resp.Body.Close()
- rb, err := ioutil.ReadAll(resp.Body)
+ rb, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error())
}
@@ -175,7 +175,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code
}
s := v.Encode()
- body := ioutil.NopCloser(strings.NewReader(s))
+ body := io.NopCloser(strings.NewReader(s))
req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body)
if err != nil {
@@ -190,7 +190,7 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code
}
defer resp.Body.Close()
- rb, err := ioutil.ReadAll(resp.Body)
+ rb, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error())
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
index 2a974a39b..fb54a4323 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go
@@ -20,7 +20,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
@@ -62,7 +61,7 @@ func SaveToken(path string, mode os.FileMode, token Token) error {
return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err)
}
- newFile, err := ioutil.TempFile(dir, "token")
+ newFile, err := os.CreateTemp(dir, "token")
if err != nil {
return fmt.Errorf("failed to create the temp file to write the token: %v", err)
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
index 2a24ab80c..67baecd83 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
@@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"math"
"net/http"
"net/url"
@@ -1061,7 +1060,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
} else if msiSecret.clientResourceID != "" {
data.Set("msi_res_id", msiSecret.clientResourceID)
}
- req.Body = ioutil.NopCloser(strings.NewReader(data.Encode()))
+ req.Body = io.NopCloser(strings.NewReader(data.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
break
case msiTypeIMDS:
@@ -1096,7 +1095,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
}
s := v.Encode()
- body := ioutil.NopCloser(strings.NewReader(s))
+ body := io.NopCloser(strings.NewReader(s))
req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost)
req.Body = body
@@ -1113,7 +1112,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter})
defer resp.Body.Close()
- rb, err := ioutil.ReadAll(resp.Body)
+ rb, err := io.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
if err != nil {
@@ -1235,7 +1234,7 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http
for attempt < maxAttempts {
if resp != nil && resp.Body != nil {
- io.Copy(ioutil.Discard, resp.Body)
+ io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
resp, err = sender.Do(req)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
index 2af5030a1..c58d7b7b8 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go
@@ -190,7 +190,7 @@ func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType)
}
}
- return string(cr.Bytes()), nil
+ return cr.String(), nil
}
func getCanonicalizedAccountName(accountName string) string {
@@ -289,7 +289,7 @@ func buildCanonicalizedHeader(headers http.Header) string {
ch.WriteRune('\n')
}
- return strings.TrimSuffix(string(ch.Bytes()), "\n")
+ return strings.TrimSuffix(ch.String(), "\n")
}
func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string {
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
index 45575eedb..f119b11d4 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
@@ -19,7 +19,7 @@ import (
"context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
"strings"
@@ -318,11 +318,11 @@ func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) {
if err == nil && resp.Body != nil {
// copy the body and close it so callers don't have to
defer resp.Body.Close()
- b, err := ioutil.ReadAll(resp.Body)
+ b, err := io.ReadAll(resp.Body)
if err != nil {
return resp, err
}
- resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ resp.Body = io.NopCloser(bytes.NewReader(b))
}
return resp, err
}
@@ -459,12 +459,12 @@ func (pt *pollingTrackerBase) updateRawBody() error {
pt.rawBody = map[string]interface{}{}
if pt.resp.ContentLength != 0 {
defer pt.resp.Body.Close()
- b, err := ioutil.ReadAll(pt.resp.Body)
+ b, err := io.ReadAll(pt.resp.Body)
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
}
// put the body back so it's available to other callers
- pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ pt.resp.Body = io.NopCloser(bytes.NewReader(b))
// observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
if len(b) == 0 {
return nil
@@ -516,11 +516,11 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
re := respErr{}
defer pt.resp.Body.Close()
var b []byte
- if b, err = ioutil.ReadAll(pt.resp.Body); err != nil {
+ if b, err = io.ReadAll(pt.resp.Body); err != nil {
goto Default
}
// put the body back so it's available to other callers
- pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ pt.resp.Body = io.NopCloser(bytes.NewReader(b))
if len(b) == 0 {
goto Default
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
index 868345db6..09c870809 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go
@@ -20,7 +20,7 @@ import (
"bytes"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"regexp"
"strconv"
@@ -333,7 +333,7 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
// Copy and replace the Body in case it does not contain an error object.
// This will leave the Body available to the caller.
b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e)
- resp.Body = ioutil.NopCloser(&b)
+ resp.Body = io.NopCloser(&b)
if decodeErr != nil {
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, decodeErr)
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
index b0a53769f..4684291a8 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
@@ -17,7 +17,6 @@ package azure
import (
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"strings"
)
@@ -315,7 +314,7 @@ func EnvironmentFromName(name string) (Environment, error) {
// This function is particularly useful in the Hybrid Cloud model, where one must define their own
// endpoints.
func EnvironmentFromFile(location string) (unmarshaled Environment, err error) {
- fileContents, err := ioutil.ReadFile(location)
+ fileContents, err := os.ReadFile(location)
if err != nil {
return
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go
index 507f9e95c..f436a4512 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go
@@ -3,7 +3,7 @@ package azure
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"strings"
@@ -236,7 +236,7 @@ func retrieveMetadataEnvironment(endpoint string) (environment environmentMetada
return environment, err
}
defer response.Body.Close()
- jsonResponse, err := ioutil.ReadAll(response.Body)
+ jsonResponse, err := io.ReadAll(response.Body)
if err != nil {
return environment, err
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go
index bb5f9396e..b2f2357e7 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/client.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/client.go
@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"log"
"net/http"
"strings"
@@ -106,14 +105,14 @@ func (li LoggingInspector) WithInspection() PrepareDecorator {
defer r.Body.Close()
- r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body))
+ r.Body = io.NopCloser(io.TeeReader(r.Body, &body))
if err := r.Write(&b); err != nil {
return nil, fmt.Errorf("Failed to write response: %v", err)
}
li.Logger.Printf(requestFormat, b.String())
- r.Body = ioutil.NopCloser(&body)
+ r.Body = io.NopCloser(&body)
return p.Prepare(r)
})
}
@@ -129,14 +128,14 @@ func (li LoggingInspector) ByInspecting() RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
var body, b bytes.Buffer
defer resp.Body.Close()
- resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body))
+ resp.Body = io.NopCloser(io.TeeReader(resp.Body, &body))
if err := resp.Write(&b); err != nil {
return fmt.Errorf("Failed to write response: %v", err)
}
li.Logger.Printf(responseFormat, b.String())
- resp.Body = ioutil.NopCloser(&body)
+ resp.Body = io.NopCloser(&body)
return r.Respond(resp)
})
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go
index 4e0543207..c879c200a 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go
@@ -1,3 +1,4 @@
+//go:build modhack
// +build modhack
package date
diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go
index 121a66fa8..f6de8c5e9 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go
@@ -21,7 +21,6 @@ import (
"encoding/xml"
"fmt"
"io"
- "io/ioutil"
"mime/multipart"
"net/http"
"net/url"
@@ -268,7 +267,7 @@ func WithBytes(input *[]byte) PrepareDecorator {
}
r.ContentLength = int64(len(*input))
- r.Body = ioutil.NopCloser(bytes.NewReader(*input))
+ r.Body = io.NopCloser(bytes.NewReader(*input))
}
return r, err
})
@@ -296,7 +295,7 @@ func WithFormData(v url.Values) PrepareDecorator {
setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost)
r.ContentLength = int64(len(s))
- r.Body = ioutil.NopCloser(strings.NewReader(s))
+ r.Body = io.NopCloser(strings.NewReader(s))
}
return r, err
})
@@ -331,7 +330,7 @@ func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDec
return r, err
}
setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType())
- r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
+ r.Body = io.NopCloser(bytes.NewReader(body.Bytes()))
r.ContentLength = int64(body.Len())
return r, err
}
@@ -346,11 +345,11 @@ func WithFile(f io.ReadCloser) PrepareDecorator {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
- b, err := ioutil.ReadAll(f)
+ b, err := io.ReadAll(f)
if err != nil {
return r, err
}
- r.Body = ioutil.NopCloser(bytes.NewReader(b))
+ r.Body = io.NopCloser(bytes.NewReader(b))
r.ContentLength = int64(len(b))
}
return r, err
@@ -396,7 +395,7 @@ func WithString(v string) PrepareDecorator {
r, err := p.Prepare(r)
if err == nil {
r.ContentLength = int64(len(v))
- r.Body = ioutil.NopCloser(strings.NewReader(v))
+ r.Body = io.NopCloser(strings.NewReader(v))
}
return r, err
})
@@ -413,7 +412,7 @@ func WithJSON(v interface{}) PrepareDecorator {
b, err := json.Marshal(v)
if err == nil {
r.ContentLength = int64(len(b))
- r.Body = ioutil.NopCloser(bytes.NewReader(b))
+ r.Body = io.NopCloser(bytes.NewReader(b))
}
}
return r, err
@@ -436,7 +435,7 @@ func WithXML(v interface{}) PrepareDecorator {
r.ContentLength = int64(len(bytesWithHeader))
setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader)))
- r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader))
+ r.Body = io.NopCloser(bytes.NewReader(bytesWithHeader))
}
}
return r, err
diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go
index 349e1963a..69d4b2b67 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/responder.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go
@@ -20,7 +20,6 @@ import (
"encoding/xml"
"fmt"
"io"
- "io/ioutil"
"net/http"
"strings"
)
@@ -111,7 +110,7 @@ func ByDiscardingBody() RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil && resp != nil && resp.Body != nil {
- if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil {
+ if _, err := io.Copy(io.Discard, resp.Body); err != nil {
return fmt.Errorf("Error discarding the response body: %v", err)
}
}
@@ -160,7 +159,7 @@ func ByUnmarshallingBytes(v *[]byte) RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil {
- bytes, errInner := ioutil.ReadAll(resp.Body)
+ bytes, errInner := io.ReadAll(resp.Body)
if errInner != nil {
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
} else {
@@ -179,7 +178,7 @@ func ByUnmarshallingJSON(v interface{}) RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil {
- b, errInner := ioutil.ReadAll(resp.Body)
+ b, errInner := io.ReadAll(resp.Body)
// Some responses might include a BOM, remove for successful unmarshalling
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
if errInner != nil {
@@ -203,7 +202,7 @@ func ByUnmarshallingXML(v interface{}) RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil {
- b, errInner := ioutil.ReadAll(resp.Body)
+ b, errInner := io.ReadAll(resp.Body)
if errInner != nil {
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
} else {
@@ -232,9 +231,9 @@ func WithErrorUnlessStatusCode(codes ...int) RespondDecorator {
resp.Status)
if resp.Body != nil {
defer resp.Body.Close()
- b, _ := ioutil.ReadAll(resp.Body)
+ b, _ := io.ReadAll(resp.Body)
derr.ServiceError = b
- resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ resp.Body = io.NopCloser(bytes.NewReader(b))
}
err = derr
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go
index fa11dbed7..7634b0f57 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go
@@ -17,7 +17,6 @@ package autorest
import (
"bytes"
"io"
- "io/ioutil"
"net/http"
)
@@ -41,12 +40,12 @@ func (rr *RetriableRequest) prepareFromByteReader() (err error) {
return err
}
} else {
- b, err = ioutil.ReadAll(rr.req.Body)
+ b, err = io.ReadAll(rr.req.Body)
if err != nil {
return err
}
}
rr.br = bytes.NewReader(b)
- rr.req.Body = ioutil.NopCloser(rr.br)
+ rr.req.Body = io.NopCloser(rr.br)
return err
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
index 4c87030e8..8340bda40 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go
@@ -19,7 +19,7 @@ package autorest
import (
"bytes"
- "io/ioutil"
+ "io"
"net/http"
)
@@ -33,10 +33,10 @@ type RetriableRequest struct {
func (rr *RetriableRequest) Prepare() (err error) {
// preserve the request body; this is to support retry logic as
// the underlying transport will always close the reqeust body
- if rr.req.Body != nil {
+ if rr.req.Body != nil && rr.req.Body != http.NoBody {
if rr.br != nil {
_, err = rr.br.Seek(0, 0 /*io.SeekStart*/)
- rr.req.Body = ioutil.NopCloser(rr.br)
+ rr.req.Body = io.NopCloser(rr.br)
}
if err != nil {
return err
diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
index 05847c08b..e36d4b046 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go
@@ -20,7 +20,6 @@ package autorest
import (
"bytes"
"io"
- "io/ioutil"
"net/http"
)
@@ -35,12 +34,12 @@ type RetriableRequest struct {
func (rr *RetriableRequest) Prepare() (err error) {
// preserve the request body; this is to support retry logic as
// the underlying transport will always close the reqeust body
- if rr.req.Body != nil {
+ if rr.req.Body != nil && rr.req.Body != http.NoBody {
if rr.rc != nil {
rr.req.Body = rr.rc
} else if rr.br != nil {
_, err = rr.br.Seek(0, io.SeekStart)
- rr.req.Body = ioutil.NopCloser(rr.br)
+ rr.req.Body = io.NopCloser(rr.br)
}
if err != nil {
return err
diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go
index d35b3850a..8c5eb5dbe 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/utility.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go
@@ -20,7 +20,6 @@ import (
"encoding/xml"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
"net/url"
@@ -217,7 +216,7 @@ func IsTemporaryNetworkError(err error) bool {
// DrainResponseBody reads the response body then closes it.
func DrainResponseBody(resp *http.Response) error {
if resp != nil && resp.Body != nil {
- _, err := io.Copy(ioutil.Discard, resp.Body)
+ _, err := io.Copy(io.Discard, resp.Body)
resp.Body.Close()
return err
}
diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go
index 0aa27680d..be6aa9ee8 100644
--- a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go
+++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go
@@ -1,3 +1,4 @@
+//go:build modhack
// +build modhack
package logger
diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go
index 2f5d8cc1a..e70dc3dc2 100644
--- a/vendor/github.com/Azure/go-autorest/logger/logger.go
+++ b/vendor/github.com/Azure/go-autorest/logger/logger.go
@@ -18,7 +18,6 @@ import (
"bytes"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"os"
@@ -182,7 +181,7 @@ var Instance Writer
// default log level
var logLevel = LogNone
-// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL.
+// Level returns the value specified in AZURE_GO_SDK_LOG_LEVEL.
// If no value was specified the default value is LogNone.
// Custom loggers can call this to retrieve the configured log level.
func Level() LevelType {
@@ -275,7 +274,7 @@ func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) {
}
if fl.shouldLogBody(req.Header, req.Body) {
// dump body
- body, err := ioutil.ReadAll(req.Body)
+ body, err := io.ReadAll(req.Body)
if err == nil {
fmt.Fprintln(b, string(filter.processBody(body)))
if nc, ok := req.Body.(io.Seeker); ok {
@@ -283,7 +282,7 @@ func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) {
nc.Seek(0, io.SeekStart)
} else {
// recreate the body
- req.Body = ioutil.NopCloser(bytes.NewReader(body))
+ req.Body = io.NopCloser(bytes.NewReader(body))
}
} else {
fmt.Fprintf(b, "failed to read body: %v\n", err)
@@ -310,10 +309,10 @@ func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
if fl.shouldLogBody(resp.Header, resp.Body) {
// dump body
defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
if err == nil {
fmt.Fprintln(b, string(filter.processBody(body)))
- resp.Body = ioutil.NopCloser(bytes.NewReader(body))
+ resp.Body = io.NopCloser(bytes.NewReader(body))
} else {
fmt.Fprintf(b, "failed to read body: %v\n", err)
}
diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go
index e163975cd..213692575 100644
--- a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go
+++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go
@@ -1,3 +1,4 @@
+//go:build modhack
// +build modhack
package tracing
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
index ff499fb66..304edc342 100644
--- a/vendor/github.com/Masterminds/semver/v3/version.go
+++ b/vendor/github.com/Masterminds/semver/v3/version.go
@@ -39,9 +39,11 @@ var (
)
// semVerRegex is the regular expression used to parse a semantic version.
-const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
- `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
- `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+// This is not the official regex from the semver spec. It has been modified to allow for loose handling
+// where versions like 2.1 are detected.
+const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
+ `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
+ `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
// Version represents a single semantic version.
type Version struct {
@@ -146,8 +148,8 @@ func NewVersion(v string) (*Version, error) {
}
sv := &Version{
- metadata: m[8],
- pre: m[5],
+ metadata: m[5],
+ pre: m[4],
original: v,
}
@@ -158,7 +160,7 @@ func NewVersion(v string) (*Version, error) {
}
if m[2] != "" {
- sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
+ sv.minor, err = strconv.ParseUint(m[2], 10, 64)
if err != nil {
return nil, fmt.Errorf("Error parsing version segment: %s", err)
}
@@ -167,7 +169,7 @@ func NewVersion(v string) (*Version, error) {
}
if m[3] != "" {
- sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
+ sv.patch, err = strconv.ParseUint(m[3], 10, 64)
if err != nil {
return nil, fmt.Errorf("Error parsing version segment: %s", err)
}
@@ -612,7 +614,9 @@ func containsOnly(s string, comp string) bool {
func validatePrerelease(p string) error {
eparts := strings.Split(p, ".")
for _, p := range eparts {
- if containsOnly(p, num) {
+ if p == "" {
+ return ErrInvalidMetadata
+ } else if containsOnly(p, num) {
if len(p) > 1 && p[0] == '0' {
return ErrSegmentStartsZero
}
@@ -631,7 +635,9 @@ func validatePrerelease(p string) error {
func validateMetadata(m string) error {
eparts := strings.Split(m, ".")
for _, p := range eparts {
- if !containsOnly(p, allowed) {
+ if p == "" {
+ return ErrInvalidMetadata
+ } else if !containsOnly(p, allowed) {
return ErrInvalidMetadata
}
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
deleted file mode 100644
index 52cf18e42..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright 2021 The ANTLR Project
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
- 1. Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
- 3. Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from this
- software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
deleted file mode 100644
index ab5121267..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
-Package antlr implements the Go version of the ANTLR 4 runtime.
-
-# The ANTLR Tool
-
-ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
-or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
-From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
-(or visitor) that makes it easy to respond to the recognition of phrases of interest.
-
-# Code Generation
-
-ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
-runtime library, written specifically to support the generated code in the target language. This library is the
-runtime for the Go target.
-
-To generate code for the go target, it is generally recommended to place the source grammar files in a package of
-their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
-it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
-that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other
-way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
-your IDE, or configuration in your CI system.
-
-Here is a general template for an ANTLR based recognizer in Go:
-
- .
- ├── myproject
- ├── parser
- │ ├── mygrammar.g4
- │ ├── antlr-4.12.0-complete.jar
- │ ├── error_listeners.go
- │ ├── generate.go
- │ ├── generate.sh
- ├── go.mod
- ├── go.sum
- ├── main.go
- └── main_test.go
-
-Make sure that the package statement in your grammar file(s) reflects the go package they exist in.
-The generate.go file then looks like this:
-
- package parser
-
- //go:generate ./generate.sh
-
-And the generate.sh file will look similar to this:
-
- #!/bin/sh
-
- alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
- antlr4 -Dlanguage=Go -no-visitor -package parser *.g4
-
-depending on whether you want visitors or listeners or any other ANTLR options.
-
-From the command line at the root of your package “myproject” you can then simply issue the command:
-
- go generate ./...
-
-# Copyright Notice
-
-Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-
-Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
-
-[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
-[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
-*/
-package antlr
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
deleted file mode 100644
index 7619fa172..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
-// context). The syntactic context is a graph-structured stack node whose
-// path(s) to the root is the rule invocation(s) chain used to arrive at the
-// state. The semantic context is the tree of semantic predicates encountered
-// before reaching an ATN state.
-type ATNConfig interface {
- Equals(o Collectable[ATNConfig]) bool
- Hash() int
-
- GetState() ATNState
- GetAlt() int
- GetSemanticContext() SemanticContext
-
- GetContext() PredictionContext
- SetContext(PredictionContext)
-
- GetReachesIntoOuterContext() int
- SetReachesIntoOuterContext(int)
-
- String() string
-
- getPrecedenceFilterSuppressed() bool
- setPrecedenceFilterSuppressed(bool)
-}
-
-type BaseATNConfig struct {
- precedenceFilterSuppressed bool
- state ATNState
- alt int
- context PredictionContext
- semanticContext SemanticContext
- reachesIntoOuterContext int
-}
-
-func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup
- return &BaseATNConfig{
- state: old.state,
- alt: old.alt,
- context: old.context,
- semanticContext: old.semanticContext,
- reachesIntoOuterContext: old.reachesIntoOuterContext,
- }
-}
-
-func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
- return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
-}
-
-func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
- if semanticContext == nil {
- panic("semanticContext cannot be nil") // TODO: Necessary?
- }
-
- return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
-}
-
-func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
- return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
-}
-
-func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
- return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
-}
-
-func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
- return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
-}
-
-func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
- return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
-}
-
-func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
- if semanticContext == nil {
- panic("semanticContext cannot be nil")
- }
-
- return &BaseATNConfig{
- state: state,
- alt: c.GetAlt(),
- context: context,
- semanticContext: semanticContext,
- reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
- precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
- }
-}
-
-func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
- return b.precedenceFilterSuppressed
-}
-
-func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
- b.precedenceFilterSuppressed = v
-}
-
-func (b *BaseATNConfig) GetState() ATNState {
- return b.state
-}
-
-func (b *BaseATNConfig) GetAlt() int {
- return b.alt
-}
-
-func (b *BaseATNConfig) SetContext(v PredictionContext) {
- b.context = v
-}
-func (b *BaseATNConfig) GetContext() PredictionContext {
- return b.context
-}
-
-func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
- return b.semanticContext
-}
-
-func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
- return b.reachesIntoOuterContext
-}
-
-func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
- b.reachesIntoOuterContext = v
-}
-
-// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
-// for a collection.
-//
-// An ATN configuration is equal to another if both have the same state, they
-// predict the same alternative, and syntactic/semantic contexts are the same.
-func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool {
- if b == o {
- return true
- } else if o == nil {
- return false
- }
-
- var other, ok = o.(*BaseATNConfig)
-
- if !ok {
- return false
- }
-
- var equal bool
-
- if b.context == nil {
- equal = other.context == nil
- } else {
- equal = b.context.Equals(other.context)
- }
-
- var (
- nums = b.state.GetStateNumber() == other.state.GetStateNumber()
- alts = b.alt == other.alt
- cons = b.semanticContext.Equals(other.semanticContext)
- sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
- )
-
- return nums && alts && cons && sups && equal
-}
-
-// Hash is the default hash function for BaseATNConfig, when no specialist hash function
-// is required for a collection
-func (b *BaseATNConfig) Hash() int {
- var c int
- if b.context != nil {
- c = b.context.Hash()
- }
-
- h := murmurInit(7)
- h = murmurUpdate(h, b.state.GetStateNumber())
- h = murmurUpdate(h, b.alt)
- h = murmurUpdate(h, c)
- h = murmurUpdate(h, b.semanticContext.Hash())
- return murmurFinish(h, 4)
-}
-
-func (b *BaseATNConfig) String() string {
- var s1, s2, s3 string
-
- if b.context != nil {
- s1 = ",[" + fmt.Sprint(b.context) + "]"
- }
-
- if b.semanticContext != SemanticContextNone {
- s2 = "," + fmt.Sprint(b.semanticContext)
- }
-
- if b.reachesIntoOuterContext > 0 {
- s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
- }
-
- return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
-}
-
-type LexerATNConfig struct {
- *BaseATNConfig
- lexerActionExecutor *LexerActionExecutor
- passedThroughNonGreedyDecision bool
-}
-
-func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
- return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
-}
-
-func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
- lexerActionExecutor: lexerActionExecutor,
- }
-}
-
-func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
- lexerActionExecutor: c.lexerActionExecutor,
- passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
- }
-}
-
-func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
- lexerActionExecutor: lexerActionExecutor,
- passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
- }
-}
-
-func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
- return &LexerATNConfig{
- BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
- lexerActionExecutor: c.lexerActionExecutor,
- passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
- }
-}
-
-func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
- return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
-}
-
-// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via
-// the default comparator [ObjEqComparator].
-func (l *LexerATNConfig) Hash() int {
- var f int
- if l.passedThroughNonGreedyDecision {
- f = 1
- } else {
- f = 0
- }
- h := murmurInit(7)
- h = murmurUpdate(h, l.state.GetStateNumber())
- h = murmurUpdate(h, l.alt)
- h = murmurUpdate(h, l.context.Hash())
- h = murmurUpdate(h, l.semanticContext.Hash())
- h = murmurUpdate(h, f)
- h = murmurUpdate(h, l.lexerActionExecutor.Hash())
- h = murmurFinish(h, 6)
- return h
-}
-
-// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via
-// the default comparator [ObjEqComparator].
-func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool {
- if l == other {
- return true
- }
- var othert, ok = other.(*LexerATNConfig)
-
- if l == other {
- return true
- } else if !ok {
- return false
- } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
- return false
- }
-
- var b bool
-
- if l.lexerActionExecutor != nil {
- b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor)
- } else {
- b = othert.lexerActionExecutor != nil
- }
-
- if b {
- return false
- }
-
- return l.BaseATNConfig.Equals(othert.BaseATNConfig)
-}
-
-func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
- var ds, ok = target.(DecisionState)
-
- return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
deleted file mode 100644
index 43e9b33f3..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go
+++ /dev/null
@@ -1,441 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
-)
-
-type ATNConfigSet interface {
- Hash() int
- Equals(o Collectable[ATNConfig]) bool
- Add(ATNConfig, *DoubleDict) bool
- AddAll([]ATNConfig) bool
-
- GetStates() *JStore[ATNState, Comparator[ATNState]]
- GetPredicates() []SemanticContext
- GetItems() []ATNConfig
-
- OptimizeConfigs(interpreter *BaseATNSimulator)
-
- Length() int
- IsEmpty() bool
- Contains(ATNConfig) bool
- ContainsFast(ATNConfig) bool
- Clear()
- String() string
-
- HasSemanticContext() bool
- SetHasSemanticContext(v bool)
-
- ReadOnly() bool
- SetReadOnly(bool)
-
- GetConflictingAlts() *BitSet
- SetConflictingAlts(*BitSet)
-
- Alts() *BitSet
-
- FullContext() bool
-
- GetUniqueAlt() int
- SetUniqueAlt(int)
-
- GetDipsIntoOuterContext() bool
- SetDipsIntoOuterContext(bool)
-}
-
-// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
-// about its elements and can combine similar configurations using a
-// graph-structured stack.
-type BaseATNConfigSet struct {
- cachedHash int
-
- // configLookup is used to determine whether two BaseATNConfigSets are equal. We
- // need all configurations with the same (s, i, _, semctx) to be equal. A key
- // effectively doubles the number of objects associated with ATNConfigs. All
- // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
- // read-only because a set becomes a DFA state.
- configLookup *JStore[ATNConfig, Comparator[ATNConfig]]
-
- // configs is the added elements.
- configs []ATNConfig
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves recomputation. Can we track conflicts as they
- // are added to save scanning configs later?
- conflictingAlts *BitSet
-
- // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
- // we hit a pred while computing a closure operation. Do not make a DFA state
- // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
- dipsIntoOuterContext bool
-
- // fullCtx is whether it is part of a full context LL prediction. Used to
- // determine how to merge $. It is a wildcard with SLL, but not for an LL
- // context merge.
- fullCtx bool
-
- // Used in parser and lexer. In lexer, it indicates we hit a pred
- // while computing a closure operation. Don't make a DFA state from a.
- hasSemanticContext bool
-
- // readOnly is whether it is read-only. Do not
- // allow any code to manipulate the set if true because DFA states will point at
- // sets and those must not change. It not, protect other fields; conflictingAlts
- // in particular, which is assigned after readOnly.
- readOnly bool
-
- // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
- // info together because it saves recomputation. Can we track conflicts as they
- // are added to save scanning configs later?
- uniqueAlt int
-}
-
-func (b *BaseATNConfigSet) Alts() *BitSet {
- alts := NewBitSet()
- for _, it := range b.configs {
- alts.add(it.GetAlt())
- }
- return alts
-}
-
-func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
- return &BaseATNConfigSet{
- cachedHash: -1,
- configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst),
- fullCtx: fullCtx,
- }
-}
-
-// Add merges contexts with existing configs for (s, i, pi, _), where s is the
-// ATNConfig.state, i is the ATNConfig.alt, and pi is the
-// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
-// dipsIntoOuterContext and hasSemanticContext when necessary.
-func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
- if b.readOnly {
- panic("set is read-only")
- }
-
- if config.GetSemanticContext() != SemanticContextNone {
- b.hasSemanticContext = true
- }
-
- if config.GetReachesIntoOuterContext() > 0 {
- b.dipsIntoOuterContext = true
- }
-
- existing, present := b.configLookup.Put(config)
-
- // The config was not already in the set
- //
- if !present {
- b.cachedHash = -1
- b.configs = append(b.configs, config) // Track order here
- return true
- }
-
- // Merge a previous (s, i, pi, _) with it and save the result
- rootIsWildcard := !b.fullCtx
- merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
-
- // No need to check for existing.context because config.context is in the cache,
- // since the only way to create new graphs is the "call rule" and here. We cache
- // at both places.
- existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
-
- // Preserve the precedence filter suppression during the merge
- if config.getPrecedenceFilterSuppressed() {
- existing.setPrecedenceFilterSuppressed(true)
- }
-
- // Replace the context because there is no need to do alt mapping
- existing.SetContext(merged)
-
- return true
-}
-
-func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
-
- // states uses the standard comparator provided by the ATNState instance
- //
- states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst)
-
- for i := 0; i < len(b.configs); i++ {
- states.Put(b.configs[i].GetState())
- }
-
- return states
-}
-
-func (b *BaseATNConfigSet) HasSemanticContext() bool {
- return b.hasSemanticContext
-}
-
-func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
- b.hasSemanticContext = v
-}
-
-func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
- preds := make([]SemanticContext, 0)
-
- for i := 0; i < len(b.configs); i++ {
- c := b.configs[i].GetSemanticContext()
-
- if c != SemanticContextNone {
- preds = append(preds, c)
- }
- }
-
- return preds
-}
-
-func (b *BaseATNConfigSet) GetItems() []ATNConfig {
- return b.configs
-}
-
-func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
- if b.readOnly {
- panic("set is read-only")
- }
-
- if b.configLookup.Len() == 0 {
- return
- }
-
- for i := 0; i < len(b.configs); i++ {
- config := b.configs[i]
-
- config.SetContext(interpreter.getCachedContext(config.GetContext()))
- }
-}
-
-func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
- for i := 0; i < len(coll); i++ {
- b.Add(coll[i], nil)
- }
-
- return false
-}
-
-// Compare is a hack function just to verify that adding DFAstares to the known
-// set works, so long as comparison of ATNConfigSet s works. For that to work, we
-// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't
-// know the order, so we do this inefficient hack. If this proves the point, then
-// we can change the config set to a better structure.
-func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool {
- if len(b.configs) != len(bs.configs) {
- return false
- }
-
- for _, c := range b.configs {
- found := false
- for _, c2 := range bs.configs {
- if c.Equals(c2) {
- found = true
- break
- }
- }
-
- if !found {
- return false
- }
-
- }
- return true
-}
-
-func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
- if b == other {
- return true
- } else if _, ok := other.(*BaseATNConfigSet); !ok {
- return false
- }
-
- other2 := other.(*BaseATNConfigSet)
-
- return b.configs != nil &&
- b.fullCtx == other2.fullCtx &&
- b.uniqueAlt == other2.uniqueAlt &&
- b.conflictingAlts == other2.conflictingAlts &&
- b.hasSemanticContext == other2.hasSemanticContext &&
- b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
- b.Compare(other2)
-}
-
-func (b *BaseATNConfigSet) Hash() int {
- if b.readOnly {
- if b.cachedHash == -1 {
- b.cachedHash = b.hashCodeConfigs()
- }
-
- return b.cachedHash
- }
-
- return b.hashCodeConfigs()
-}
-
-func (b *BaseATNConfigSet) hashCodeConfigs() int {
- h := 1
- for _, config := range b.configs {
- h = 31*h + config.Hash()
- }
- return h
-}
-
-func (b *BaseATNConfigSet) Length() int {
- return len(b.configs)
-}
-
-func (b *BaseATNConfigSet) IsEmpty() bool {
- return len(b.configs) == 0
-}
-
-func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
- if b.configLookup == nil {
- panic("not implemented for read-only sets")
- }
-
- return b.configLookup.Contains(item)
-}
-
-func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
- if b.configLookup == nil {
- panic("not implemented for read-only sets")
- }
-
- return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
-}
-
-func (b *BaseATNConfigSet) Clear() {
- if b.readOnly {
- panic("set is read-only")
- }
-
- b.configs = make([]ATNConfig, 0)
- b.cachedHash = -1
- b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
-}
-
-func (b *BaseATNConfigSet) FullContext() bool {
- return b.fullCtx
-}
-
-func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
- return b.dipsIntoOuterContext
-}
-
-func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
- b.dipsIntoOuterContext = v
-}
-
-func (b *BaseATNConfigSet) GetUniqueAlt() int {
- return b.uniqueAlt
-}
-
-func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
- b.uniqueAlt = v
-}
-
-func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
- return b.conflictingAlts
-}
-
-func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
- b.conflictingAlts = v
-}
-
-func (b *BaseATNConfigSet) ReadOnly() bool {
- return b.readOnly
-}
-
-func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
- b.readOnly = readOnly
-
- if readOnly {
- b.configLookup = nil // Read only, so no need for the lookup cache
- }
-}
-
-func (b *BaseATNConfigSet) String() string {
- s := "["
-
- for i, c := range b.configs {
- s += c.String()
-
- if i != len(b.configs)-1 {
- s += ", "
- }
- }
-
- s += "]"
-
- if b.hasSemanticContext {
- s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
- }
-
- if b.uniqueAlt != ATNInvalidAltNumber {
- s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
- }
-
- if b.conflictingAlts != nil {
- s += ",conflictingAlts=" + b.conflictingAlts.String()
- }
-
- if b.dipsIntoOuterContext {
- s += ",dipsIntoOuterContext"
- }
-
- return s
-}
-
-type OrderedATNConfigSet struct {
- *BaseATNConfigSet
-}
-
-func NewOrderedATNConfigSet() *OrderedATNConfigSet {
- b := NewBaseATNConfigSet(false)
-
- // This set uses the standard Hash() and Equals() from ATNConfig
- b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
-
- return &OrderedATNConfigSet{BaseATNConfigSet: b}
-}
-
-func hashATNConfig(i interface{}) int {
- o := i.(ATNConfig)
- hash := 7
- hash = 31*hash + o.GetState().GetStateNumber()
- hash = 31*hash + o.GetAlt()
- hash = 31*hash + o.GetSemanticContext().Hash()
- return hash
-}
-
-func equalATNConfigs(a, b interface{}) bool {
- if a == nil || b == nil {
- return false
- }
-
- if a == b {
- return true
- }
-
- var ai, ok = a.(ATNConfig)
- var bi, ok1 = b.(ATNConfig)
-
- if !ok || !ok1 {
- return false
- }
-
- if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
- return false
- }
-
- if ai.GetAlt() != bi.GetAlt() {
- return false
- }
-
- return ai.GetSemanticContext().Equals(bi.GetSemanticContext())
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
deleted file mode 100644
index 41529115f..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
-
-type IATNSimulator interface {
- SharedContextCache() *PredictionContextCache
- ATN() *ATN
- DecisionToDFA() []*DFA
-}
-
-type BaseATNSimulator struct {
- atn *ATN
- sharedContextCache *PredictionContextCache
- decisionToDFA []*DFA
-}
-
-func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
- b := new(BaseATNSimulator)
-
- b.atn = atn
- b.sharedContextCache = sharedContextCache
-
- return b
-}
-
-func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
- if b.sharedContextCache == nil {
- return context
- }
-
- visited := make(map[PredictionContext]PredictionContext)
-
- return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
-}
-
-func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
- return b.sharedContextCache
-}
-
-func (b *BaseATNSimulator) ATN() *ATN {
- return b.atn
-}
-
-func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
- return b.decisionToDFA
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
deleted file mode 100644
index 1f2a56bc3..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "strconv"
-
-// Constants for serialization.
-const (
- ATNStateInvalidType = 0
- ATNStateBasic = 1
- ATNStateRuleStart = 2
- ATNStateBlockStart = 3
- ATNStatePlusBlockStart = 4
- ATNStateStarBlockStart = 5
- ATNStateTokenStart = 6
- ATNStateRuleStop = 7
- ATNStateBlockEnd = 8
- ATNStateStarLoopBack = 9
- ATNStateStarLoopEntry = 10
- ATNStatePlusLoopBack = 11
- ATNStateLoopEnd = 12
-
- ATNStateInvalidStateNumber = -1
-)
-
-var ATNStateInitialNumTransitions = 4
-
-type ATNState interface {
- GetEpsilonOnlyTransitions() bool
-
- GetRuleIndex() int
- SetRuleIndex(int)
-
- GetNextTokenWithinRule() *IntervalSet
- SetNextTokenWithinRule(*IntervalSet)
-
- GetATN() *ATN
- SetATN(*ATN)
-
- GetStateType() int
-
- GetStateNumber() int
- SetStateNumber(int)
-
- GetTransitions() []Transition
- SetTransitions([]Transition)
- AddTransition(Transition, int)
-
- String() string
- Hash() int
- Equals(Collectable[ATNState]) bool
-}
-
-type BaseATNState struct {
- // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
- NextTokenWithinRule *IntervalSet
-
- // atn is the current ATN.
- atn *ATN
-
- epsilonOnlyTransitions bool
-
- // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
- ruleIndex int
-
- stateNumber int
-
- stateType int
-
- // Track the transitions emanating from this ATN state.
- transitions []Transition
-}
-
-func NewBaseATNState() *BaseATNState {
- return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
-}
-
-func (as *BaseATNState) GetRuleIndex() int {
- return as.ruleIndex
-}
-
-func (as *BaseATNState) SetRuleIndex(v int) {
- as.ruleIndex = v
-}
-func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
- return as.epsilonOnlyTransitions
-}
-
-func (as *BaseATNState) GetATN() *ATN {
- return as.atn
-}
-
-func (as *BaseATNState) SetATN(atn *ATN) {
- as.atn = atn
-}
-
-func (as *BaseATNState) GetTransitions() []Transition {
- return as.transitions
-}
-
-func (as *BaseATNState) SetTransitions(t []Transition) {
- as.transitions = t
-}
-
-func (as *BaseATNState) GetStateType() int {
- return as.stateType
-}
-
-func (as *BaseATNState) GetStateNumber() int {
- return as.stateNumber
-}
-
-func (as *BaseATNState) SetStateNumber(stateNumber int) {
- as.stateNumber = stateNumber
-}
-
-func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
- return as.NextTokenWithinRule
-}
-
-func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
- as.NextTokenWithinRule = v
-}
-
-func (as *BaseATNState) Hash() int {
- return as.stateNumber
-}
-
-func (as *BaseATNState) String() string {
- return strconv.Itoa(as.stateNumber)
-}
-
-func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
- if ot, ok := other.(ATNState); ok {
- return as.stateNumber == ot.GetStateNumber()
- }
-
- return false
-}
-
-func (as *BaseATNState) isNonGreedyExitState() bool {
- return false
-}
-
-func (as *BaseATNState) AddTransition(trans Transition, index int) {
- if len(as.transitions) == 0 {
- as.epsilonOnlyTransitions = trans.getIsEpsilon()
- } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
- as.epsilonOnlyTransitions = false
- }
-
- if index == -1 {
- as.transitions = append(as.transitions, trans)
- } else {
- as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
- // TODO: as.transitions.splice(index, 1, trans)
- }
-}
-
-type BasicState struct {
- *BaseATNState
-}
-
-func NewBasicState() *BasicState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateBasic
-
- return &BasicState{BaseATNState: b}
-}
-
-type DecisionState interface {
- ATNState
-
- getDecision() int
- setDecision(int)
-
- getNonGreedy() bool
- setNonGreedy(bool)
-}
-
-type BaseDecisionState struct {
- *BaseATNState
- decision int
- nonGreedy bool
-}
-
-func NewBaseDecisionState() *BaseDecisionState {
- return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
-}
-
-func (s *BaseDecisionState) getDecision() int {
- return s.decision
-}
-
-func (s *BaseDecisionState) setDecision(b int) {
- s.decision = b
-}
-
-func (s *BaseDecisionState) getNonGreedy() bool {
- return s.nonGreedy
-}
-
-func (s *BaseDecisionState) setNonGreedy(b bool) {
- s.nonGreedy = b
-}
-
-type BlockStartState interface {
- DecisionState
-
- getEndState() *BlockEndState
- setEndState(*BlockEndState)
-}
-
-// BaseBlockStartState is the start of a regular (...) block.
-type BaseBlockStartState struct {
- *BaseDecisionState
- endState *BlockEndState
-}
-
-func NewBlockStartState() *BaseBlockStartState {
- return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
-}
-
-func (s *BaseBlockStartState) getEndState() *BlockEndState {
- return s.endState
-}
-
-func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
- s.endState = b
-}
-
-type BasicBlockStartState struct {
- *BaseBlockStartState
-}
-
-func NewBasicBlockStartState() *BasicBlockStartState {
- b := NewBlockStartState()
-
- b.stateType = ATNStateBlockStart
-
- return &BasicBlockStartState{BaseBlockStartState: b}
-}
-
-var _ BlockStartState = &BasicBlockStartState{}
-
-// BlockEndState is a terminal node of a simple (a|b|c) block.
-type BlockEndState struct {
- *BaseATNState
- startState ATNState
-}
-
-func NewBlockEndState() *BlockEndState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateBlockEnd
-
- return &BlockEndState{BaseATNState: b}
-}
-
-// RuleStopState is the last node in the ATN for a rule, unless that rule is the
-// start symbol. In that case, there is one transition to EOF. Later, we might
-// encode references to all calls to this rule to compute FOLLOW sets for error
-// handling.
-type RuleStopState struct {
- *BaseATNState
-}
-
-func NewRuleStopState() *RuleStopState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateRuleStop
-
- return &RuleStopState{BaseATNState: b}
-}
-
-type RuleStartState struct {
- *BaseATNState
- stopState ATNState
- isPrecedenceRule bool
-}
-
-func NewRuleStartState() *RuleStartState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateRuleStart
-
- return &RuleStartState{BaseATNState: b}
-}
-
-// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
-// transitions: one to the loop back to start of the block, and one to exit.
-type PlusLoopbackState struct {
- *BaseDecisionState
-}
-
-func NewPlusLoopbackState() *PlusLoopbackState {
- b := NewBaseDecisionState()
-
- b.stateType = ATNStatePlusLoopBack
-
- return &PlusLoopbackState{BaseDecisionState: b}
-}
-
-// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
-// decision state; we don't use it for code generation. Somebody might need it,
-// it is included for completeness. In reality, PlusLoopbackState is the real
-// decision-making node for A+.
-type PlusBlockStartState struct {
- *BaseBlockStartState
- loopBackState ATNState
-}
-
-func NewPlusBlockStartState() *PlusBlockStartState {
- b := NewBlockStartState()
-
- b.stateType = ATNStatePlusBlockStart
-
- return &PlusBlockStartState{BaseBlockStartState: b}
-}
-
-var _ BlockStartState = &PlusBlockStartState{}
-
-// StarBlockStartState is the block that begins a closure loop.
-type StarBlockStartState struct {
- *BaseBlockStartState
-}
-
-func NewStarBlockStartState() *StarBlockStartState {
- b := NewBlockStartState()
-
- b.stateType = ATNStateStarBlockStart
-
- return &StarBlockStartState{BaseBlockStartState: b}
-}
-
-var _ BlockStartState = &StarBlockStartState{}
-
-type StarLoopbackState struct {
- *BaseATNState
-}
-
-func NewStarLoopbackState() *StarLoopbackState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateStarLoopBack
-
- return &StarLoopbackState{BaseATNState: b}
-}
-
-type StarLoopEntryState struct {
- *BaseDecisionState
- loopBackState ATNState
- precedenceRuleDecision bool
-}
-
-func NewStarLoopEntryState() *StarLoopEntryState {
- b := NewBaseDecisionState()
-
- b.stateType = ATNStateStarLoopEntry
-
- // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
- return &StarLoopEntryState{BaseDecisionState: b}
-}
-
-// LoopEndState marks the end of a * or + loop.
-type LoopEndState struct {
- *BaseATNState
- loopBackState ATNState
-}
-
-func NewLoopEndState() *LoopEndState {
- b := NewBaseATNState()
-
- b.stateType = ATNStateLoopEnd
-
- return &LoopEndState{BaseATNState: b}
-}
-
-// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
-type TokensStartState struct {
- *BaseDecisionState
-}
-
-func NewTokensStartState() *TokensStartState {
- b := NewBaseDecisionState()
-
- b.stateType = ATNStateTokenStart
-
- return &TokensStartState{BaseDecisionState: b}
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
deleted file mode 100644
index f679f0dcd..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "os"
- "strconv"
-)
-
-// Provides an empty default implementation of {@link ANTLRErrorListener}. The
-// default implementation of each method does nothing, but can be overridden as
-// necessary.
-
-type ErrorListener interface {
- SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
- ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
- ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
- ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
-}
-
-type DefaultErrorListener struct {
-}
-
-func NewDefaultErrorListener() *DefaultErrorListener {
- return new(DefaultErrorListener)
-}
-
-func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
-}
-
-func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
-}
-
-func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
-}
-
-func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
-}
-
-type ConsoleErrorListener struct {
- *DefaultErrorListener
-}
-
-func NewConsoleErrorListener() *ConsoleErrorListener {
- return new(ConsoleErrorListener)
-}
-
-// Provides a default instance of {@link ConsoleErrorListener}.
-var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
-
-// {@inheritDoc}
-//
-//
-// This implementation prints messages to {@link System//err} containing the
-// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
-// the following format.
-//
-//
-// line line:charPositionInLine msg
-//
-func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
- fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
-}
-
-type ProxyErrorListener struct {
- *DefaultErrorListener
- delegates []ErrorListener
-}
-
-func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
- if delegates == nil {
- panic("delegates is not provided")
- }
- l := new(ProxyErrorListener)
- l.delegates = delegates
- return l
-}
-
-func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
- for _, d := range p.delegates {
- d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
- }
-}
-
-func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
- for _, d := range p.delegates {
- d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
- }
-}
-
-func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
- for _, d := range p.delegates {
- d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
- }
-}
-
-func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
- for _, d := range p.delegates {
- d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
- }
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
deleted file mode 100644
index 5c0a637ba..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go
+++ /dev/null
@@ -1,734 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-type ErrorStrategy interface {
- reset(Parser)
- RecoverInline(Parser) Token
- Recover(Parser, RecognitionException)
- Sync(Parser)
- InErrorRecoveryMode(Parser) bool
- ReportError(Parser, RecognitionException)
- ReportMatch(Parser)
-}
-
-// This is the default implementation of {@link ANTLRErrorStrategy} used for
-// error Reporting and recovery in ANTLR parsers.
-type DefaultErrorStrategy struct {
- errorRecoveryMode bool
- lastErrorIndex int
- lastErrorStates *IntervalSet
-}
-
-var _ ErrorStrategy = &DefaultErrorStrategy{}
-
-func NewDefaultErrorStrategy() *DefaultErrorStrategy {
-
- d := new(DefaultErrorStrategy)
-
- // Indicates whether the error strategy is currently "recovering from an
- // error". This is used to suppress Reporting multiple error messages while
- // attempting to recover from a detected syntax error.
- //
- // @see //InErrorRecoveryMode
- //
- d.errorRecoveryMode = false
-
- // The index into the input stream where the last error occurred.
- // This is used to prevent infinite loops where an error is found
- // but no token is consumed during recovery...another error is found,
- // ad nauseum. This is a failsafe mechanism to guarantee that at least
- // one token/tree node is consumed for two errors.
- //
- d.lastErrorIndex = -1
- d.lastErrorStates = nil
- return d
-}
-
-// The default implementation simply calls {@link //endErrorCondition} to
-// ensure that the handler is not in error recovery mode.
-func (d *DefaultErrorStrategy) reset(recognizer Parser) {
- d.endErrorCondition(recognizer)
-}
-
-// This method is called to enter error recovery mode when a recognition
-// exception is Reported.
-//
-// @param recognizer the parser instance
-func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
- d.errorRecoveryMode = true
-}
-
-func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
- return d.errorRecoveryMode
-}
-
-// This method is called to leave error recovery mode after recovering from
-// a recognition exception.
-//
-// @param recognizer
-func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
- d.errorRecoveryMode = false
- d.lastErrorStates = nil
- d.lastErrorIndex = -1
-}
-
-// {@inheritDoc}
-//
-// The default implementation simply calls {@link //endErrorCondition}.
-func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
- d.endErrorCondition(recognizer)
-}
-
-// {@inheritDoc}
-//
-// The default implementation returns immediately if the handler is already
-// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
-// and dispatches the Reporting task based on the runtime type of {@code e}
-// according to the following table.
-//
-//
-// - {@link NoViableAltException}: Dispatches the call to
-// {@link //ReportNoViableAlternative}
-// - {@link InputMisMatchException}: Dispatches the call to
-// {@link //ReportInputMisMatch}
-// - {@link FailedPredicateException}: Dispatches the call to
-// {@link //ReportFailedPredicate}
-// - All other types: calls {@link Parser//NotifyErrorListeners} to Report
-// the exception
-//
-func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
- // if we've already Reported an error and have not Matched a token
- // yet successfully, don't Report any errors.
- if d.InErrorRecoveryMode(recognizer) {
- return // don't Report spurious errors
- }
- d.beginErrorCondition(recognizer)
-
- switch t := e.(type) {
- default:
- fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
- // fmt.Println(e.stack)
- recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
- case *NoViableAltException:
- d.ReportNoViableAlternative(recognizer, t)
- case *InputMisMatchException:
- d.ReportInputMisMatch(recognizer, t)
- case *FailedPredicateException:
- d.ReportFailedPredicate(recognizer, t)
- }
-}
-
-// {@inheritDoc}
-//
-// The default implementation reSynchronizes the parser by consuming tokens
-// until we find one in the reSynchronization set--loosely the set of tokens
-// that can follow the current rule.
-func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
-
- if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
- d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
- // uh oh, another error at same token index and previously-Visited
- // state in ATN must be a case where LT(1) is in the recovery
- // token set so nothing got consumed. Consume a single token
- // at least to prevent an infinite loop d is a failsafe.
- recognizer.Consume()
- }
- d.lastErrorIndex = recognizer.GetInputStream().Index()
- if d.lastErrorStates == nil {
- d.lastErrorStates = NewIntervalSet()
- }
- d.lastErrorStates.addOne(recognizer.GetState())
- followSet := d.getErrorRecoverySet(recognizer)
- d.consumeUntil(recognizer, followSet)
-}
-
-// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
-// that the current lookahead symbol is consistent with what were expecting
-// at d point in the ATN. You can call d anytime but ANTLR only
-// generates code to check before subrules/loops and each iteration.
-//
-// Implements Jim Idle's magic Sync mechanism in closures and optional
-// subrules. E.g.,
-//
-//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-//
-//
-// At the start of a sub rule upon error, {@link //Sync} performs single
-// token deletion, if possible. If it can't do that, it bails on the current
-// rule and uses the default error recovery, which consumes until the
-// reSynchronization set of the current rule.
-//
-// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
-// with an empty alternative), then the expected set includes what follows
-// the subrule.
-//
-// During loop iteration, it consumes until it sees a token that can start a
-// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
-// stay in the loop as long as possible.
-//
-// ORIGINS
-//
-// Previous versions of ANTLR did a poor job of their recovery within loops.
-// A single mismatch token or missing token would force the parser to bail
-// out of the entire rules surrounding the loop. So, for rule
-//
-//
-// classfunc : 'class' ID '{' member* '}'
-//
-//
-// input with an extra token between members would force the parser to
-// consume until it found the next class definition rather than the next
-// member definition of the current class.
-//
-// This functionality cost a little bit of effort because the parser has to
-// compare token set at the start of the loop and at each iteration. If for
-// some reason speed is suffering for you, you can turn off d
-// functionality by simply overriding d method as a blank { }.
-func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
- // If already recovering, don't try to Sync
- if d.InErrorRecoveryMode(recognizer) {
- return
- }
-
- s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
- la := recognizer.GetTokenStream().LA(1)
-
- // try cheaper subset first might get lucky. seems to shave a wee bit off
- nextTokens := recognizer.GetATN().NextTokens(s, nil)
- if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
- return
- }
-
- switch s.GetStateType() {
- case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
- // Report error and recover if possible
- if d.SingleTokenDeletion(recognizer) != nil {
- return
- }
- panic(NewInputMisMatchException(recognizer))
- case ATNStatePlusLoopBack, ATNStateStarLoopBack:
- d.ReportUnwantedToken(recognizer)
- expecting := NewIntervalSet()
- expecting.addSet(recognizer.GetExpectedTokens())
- whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
- d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
- default:
- // do nothing if we can't identify the exact kind of ATN state
- }
-}
-
-// This is called by {@link //ReportError} when the exception is a
-// {@link NoViableAltException}.
-//
-// @see //ReportError
-//
-// @param recognizer the parser instance
-// @param e the recognition exception
-func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
- tokens := recognizer.GetTokenStream()
- var input string
- if tokens != nil {
- if e.startToken.GetTokenType() == TokenEOF {
- input = ""
- } else {
- input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
- }
- } else {
- input = ""
- }
- msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
- recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
-}
-
-// This is called by {@link //ReportError} when the exception is an
-// {@link InputMisMatchException}.
-//
-// @see //ReportError
-//
-// @param recognizer the parser instance
-// @param e the recognition exception
-func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
- msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
- " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
- recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
-}
-
-// This is called by {@link //ReportError} when the exception is a
-// {@link FailedPredicateException}.
-//
-// @see //ReportError
-//
-// @param recognizer the parser instance
-// @param e the recognition exception
-func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
- ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
- msg := "rule " + ruleName + " " + e.message
- recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
-}
-
-// This method is called to Report a syntax error which requires the removal
-// of a token from the input stream. At the time d method is called, the
-// erroneous symbol is current {@code LT(1)} symbol and has not yet been
-// removed from the input stream. When d method returns,
-// {@code recognizer} is in error recovery mode.
-//
-// This method is called when {@link //singleTokenDeletion} identifies
-// single-token deletion as a viable recovery strategy for a mismatched
-// input error.
-//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
-// enter error recovery mode, followed by calling
-// {@link Parser//NotifyErrorListeners}.
-//
-// @param recognizer the parser instance
-func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
- if d.InErrorRecoveryMode(recognizer) {
- return
- }
- d.beginErrorCondition(recognizer)
- t := recognizer.GetCurrentToken()
- tokenName := d.GetTokenErrorDisplay(t)
- expecting := d.GetExpectedTokens(recognizer)
- msg := "extraneous input " + tokenName + " expecting " +
- expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
- recognizer.NotifyErrorListeners(msg, t, nil)
-}
-
-// This method is called to Report a syntax error which requires the
-// insertion of a missing token into the input stream. At the time d
-// method is called, the missing token has not yet been inserted. When d
-// method returns, {@code recognizer} is in error recovery mode.
-//
-// This method is called when {@link //singleTokenInsertion} identifies
-// single-token insertion as a viable recovery strategy for a mismatched
-// input error.
-//
-// The default implementation simply returns if the handler is already in
-// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
-// enter error recovery mode, followed by calling
-// {@link Parser//NotifyErrorListeners}.
-//
-// @param recognizer the parser instance
-func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
- if d.InErrorRecoveryMode(recognizer) {
- return
- }
- d.beginErrorCondition(recognizer)
- t := recognizer.GetCurrentToken()
- expecting := d.GetExpectedTokens(recognizer)
- msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
- " at " + d.GetTokenErrorDisplay(t)
- recognizer.NotifyErrorListeners(msg, t, nil)
-}
-
-// The default implementation attempts to recover from the mismatched input
-// by using single token insertion and deletion as described below. If the
-// recovery attempt fails, d method panics an
-// {@link InputMisMatchException}.
-//
-// EXTRA TOKEN (single token deletion)
-//
-// {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
-// right token, however, then assume {@code LA(1)} is some extra spurious
-// token and delete it. Then consume and return the next token (which was
-// the {@code LA(2)} token) as the successful result of the Match operation.
-//
-// This recovery strategy is implemented by {@link
-// //singleTokenDeletion}.
-//
-// MISSING TOKEN (single token insertion)
-//
-// If current token (at {@code LA(1)}) is consistent with what could come
-// after the expected {@code LA(1)} token, then assume the token is missing
-// and use the parser's {@link TokenFactory} to create it on the fly. The
-// "insertion" is performed by returning the created token as the successful
-// result of the Match operation.
-//
-// This recovery strategy is implemented by {@link
-// //singleTokenInsertion}.
-//
-// EXAMPLE
-//
-// For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
-// the parser returns from the nested call to {@code expr}, it will have
-// call chain:
-//
-//
-// stat &rarr expr &rarr atom
-//
-//
-// and it will be trying to Match the {@code ')'} at d point in the
-// derivation:
-//
-//
-// => ID '=' '(' INT ')' ('+' atom)* ”
-// ^
-//
-//
-// The attempt to Match {@code ')'} will fail when it sees {@code ”} and
-// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”}
-// is in the set of tokens that can follow the {@code ')'} token reference
-// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
-func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
- // SINGLE TOKEN DELETION
- MatchedSymbol := d.SingleTokenDeletion(recognizer)
- if MatchedSymbol != nil {
- // we have deleted the extra token.
- // now, move past ttype token as if all were ok
- recognizer.Consume()
- return MatchedSymbol
- }
- // SINGLE TOKEN INSERTION
- if d.SingleTokenInsertion(recognizer) {
- return d.GetMissingSymbol(recognizer)
- }
- // even that didn't work must panic the exception
- panic(NewInputMisMatchException(recognizer))
-}
-
-// This method implements the single-token insertion inline error recovery
-// strategy. It is called by {@link //recoverInline} if the single-token
-// deletion strategy fails to recover from the mismatched input. If this
-// method returns {@code true}, {@code recognizer} will be in error recovery
-// mode.
-//
-// This method determines whether or not single-token insertion is viable by
-// checking if the {@code LA(1)} input symbol could be successfully Matched
-// if it were instead the {@code LA(2)} symbol. If d method returns
-// {@code true}, the caller is responsible for creating and inserting a
-// token with the correct type to produce d behavior.
-//
-// @param recognizer the parser instance
-// @return {@code true} if single-token insertion is a viable recovery
-// strategy for the current mismatched input, otherwise {@code false}
-func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
- currentSymbolType := recognizer.GetTokenStream().LA(1)
- // if current token is consistent with what could come after current
- // ATN state, then we know we're missing a token error recovery
- // is free to conjure up and insert the missing token
- atn := recognizer.GetInterpreter().atn
- currentState := atn.states[recognizer.GetState()]
- next := currentState.GetTransitions()[0].getTarget()
- expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
- if expectingAtLL2.contains(currentSymbolType) {
- d.ReportMissingToken(recognizer)
- return true
- }
-
- return false
-}
-
-// This method implements the single-token deletion inline error recovery
-// strategy. It is called by {@link //recoverInline} to attempt to recover
-// from mismatched input. If this method returns nil, the parser and error
-// handler state will not have changed. If this method returns non-nil,
-// {@code recognizer} will not be in error recovery mode since the
-// returned token was a successful Match.
-//
-// If the single-token deletion is successful, d method calls
-// {@link //ReportUnwantedToken} to Report the error, followed by
-// {@link Parser//consume} to actually "delete" the extraneous token. Then,
-// before returning {@link //ReportMatch} is called to signal a successful
-// Match.
-//
-// @param recognizer the parser instance
-// @return the successfully Matched {@link Token} instance if single-token
-// deletion successfully recovers from the mismatched input, otherwise
-// {@code nil}
-func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
- NextTokenType := recognizer.GetTokenStream().LA(2)
- expecting := d.GetExpectedTokens(recognizer)
- if expecting.contains(NextTokenType) {
- d.ReportUnwantedToken(recognizer)
- // print("recoverFromMisMatchedToken deleting " \
- // + str(recognizer.GetTokenStream().LT(1)) \
- // + " since " + str(recognizer.GetTokenStream().LT(2)) \
- // + " is what we want", file=sys.stderr)
- recognizer.Consume() // simply delete extra token
- // we want to return the token we're actually Matching
- MatchedSymbol := recognizer.GetCurrentToken()
- d.ReportMatch(recognizer) // we know current token is correct
- return MatchedSymbol
- }
-
- return nil
-}
-
-// Conjure up a missing token during error recovery.
-//
-// The recognizer attempts to recover from single missing
-// symbols. But, actions might refer to that missing symbol.
-// For example, x=ID {f($x)}. The action clearly assumes
-// that there has been an identifier Matched previously and that
-// $x points at that token. If that token is missing, but
-// the next token in the stream is what we want we assume that
-// d token is missing and we keep going. Because we
-// have to return some token to replace the missing token,
-// we have to conjure one up. This method gives the user control
-// over the tokens returned for missing tokens. Mostly,
-// you will want to create something special for identifier
-// tokens. For literals such as '{' and ',', the default
-// action in the parser or tree parser works. It simply creates
-// a CommonToken of the appropriate type. The text will be the token.
-// If you change what tokens must be created by the lexer,
-// override d method to create the appropriate tokens.
-func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
- currentSymbol := recognizer.GetCurrentToken()
- expecting := d.GetExpectedTokens(recognizer)
- expectedTokenType := expecting.first()
- var tokenText string
-
- if expectedTokenType == TokenEOF {
- tokenText = ""
- } else {
- ln := recognizer.GetLiteralNames()
- if expectedTokenType > 0 && expectedTokenType < len(ln) {
- tokenText = ""
- } else {
- tokenText = "" // TODO matches the JS impl
- }
- }
- current := currentSymbol
- lookback := recognizer.GetTokenStream().LT(-1)
- if current.GetTokenType() == TokenEOF && lookback != nil {
- current = lookback
- }
-
- tf := recognizer.GetTokenFactory()
-
- return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
-}
-
-func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
- return recognizer.GetExpectedTokens()
-}
-
-// How should a token be displayed in an error message? The default
-// is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
-func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
- if t == nil {
- return ""
- }
- s := t.GetText()
- if s == "" {
- if t.GetTokenType() == TokenEOF {
- s = ""
- } else {
- s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
- }
- }
- return d.escapeWSAndQuote(s)
-}
-
-func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
- s = strings.Replace(s, "\t", "\\t", -1)
- s = strings.Replace(s, "\n", "\\n", -1)
- s = strings.Replace(s, "\r", "\\r", -1)
- return "'" + s + "'"
-}
-
-// Compute the error recovery set for the current rule. During
-// rule invocation, the parser pushes the set of tokens that can
-// follow that rule reference on the stack d amounts to
-// computing FIRST of what follows the rule reference in the
-// enclosing rule. See LinearApproximator.FIRST().
-// This local follow set only includes tokens
-// from within the rule i.e., the FIRST computation done by
-// ANTLR stops at the end of a rule.
-//
-// # EXAMPLE
-//
-// When you find a "no viable alt exception", the input is not
-// consistent with any of the alternatives for rule r. The best
-// thing to do is to consume tokens until you see something that
-// can legally follow a call to r//or* any rule that called r.
-// You don't want the exact set of viable next tokens because the
-// input might just be missing a token--you might consume the
-// rest of the input looking for one of the missing tokens.
-//
-// Consider grammar:
-//
-// a : '[' b ']'
-// | '(' b ')'
-//
-// b : c '^' INT
-// c : ID
-// | INT
-//
-// At each rule invocation, the set of tokens that could follow
-// that rule is pushed on a stack. Here are the various
-// context-sensitive follow sets:
-//
-// FOLLOW(b1_in_a) = FIRST(']') = ']'
-// FOLLOW(b2_in_a) = FIRST(')') = ')'
-// FOLLOW(c_in_b) = FIRST('^') = '^'
-//
-// Upon erroneous input "[]", the call chain is
-//
-// a -> b -> c
-//
-// and, hence, the follow context stack is:
-//
-// depth follow set start of rule execution
-// 0 a (from main())
-// 1 ']' b
-// 2 '^' c
-//
-// Notice that ')' is not included, because b would have to have
-// been called from a different context in rule a for ')' to be
-// included.
-//
-// For error recovery, we cannot consider FOLLOW(c)
-// (context-sensitive or otherwise). We need the combined set of
-// all context-sensitive FOLLOW sets--the set of all tokens that
-// could follow any reference in the call chain. We need to
-// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
-// we reSync'd to that token, we'd consume until EOF. We need to
-// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
-// In this case, for input "[]", LA(1) is ']' and in the set, so we would
-// not consume anything. After printing an error, rule c would
-// return normally. Rule b would not find the required '^' though.
-// At this point, it gets a mismatched token error and panics an
-// exception (since LA(1) is not in the viable following token
-// set). The rule exception handler tries to recover, but finds
-// the same recovery set and doesn't consume anything. Rule b
-// exits normally returning to rule a. Now it finds the ']' (and
-// with the successful Match exits errorRecovery mode).
-//
-// So, you can see that the parser walks up the call chain looking
-// for the token that was a member of the recovery set.
-//
-// Errors are not generated in errorRecovery mode.
-//
-// ANTLR's error recovery mechanism is based upon original ideas:
-//
-// "Algorithms + Data Structures = Programs" by Niklaus Wirth
-//
-// and
-//
-// "A note on error recovery in recursive descent parsers":
-// http://portal.acm.org/citation.cfm?id=947902.947905
-//
-// Later, Josef Grosch had some good ideas:
-//
-// "Efficient and Comfortable Error Recovery in Recursive Descent
-// Parsers":
-// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
-//
-// Like Grosch I implement context-sensitive FOLLOW sets that are combined
-// at run-time upon error to avoid overhead during parsing.
-func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
- atn := recognizer.GetInterpreter().atn
- ctx := recognizer.GetParserRuleContext()
- recoverSet := NewIntervalSet()
- for ctx != nil && ctx.GetInvokingState() >= 0 {
- // compute what follows who invoked us
- invokingState := atn.states[ctx.GetInvokingState()]
- rt := invokingState.GetTransitions()[0]
- follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
- recoverSet.addSet(follow)
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- recoverSet.removeOne(TokenEpsilon)
- return recoverSet
-}
-
-// Consume tokens until one Matches the given token set.//
-func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
- ttype := recognizer.GetTokenStream().LA(1)
- for ttype != TokenEOF && !set.contains(ttype) {
- recognizer.Consume()
- ttype = recognizer.GetTokenStream().LA(1)
- }
-}
-
-//
-// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
-// by immediately canceling the parse operation with a
-// {@link ParseCancellationException}. The implementation ensures that the
-// {@link ParserRuleContext//exception} field is set for all parse tree nodes
-// that were not completed prior to encountering the error.
-//
-//
-// This error strategy is useful in the following scenarios.
-//
-//
-// - Two-stage parsing: This error strategy allows the first
-// stage of two-stage parsing to immediately terminate if an error is
-// encountered, and immediately fall back to the second stage. In addition to
-// avoiding wasted work by attempting to recover from errors here, the empty
-// implementation of {@link BailErrorStrategy//Sync} improves the performance of
-// the first stage.
-// - Silent validation: When syntax errors are not being
-// Reported or logged, and the parse result is simply ignored if errors occur,
-// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
-// when the result will be ignored either way.
-//
-//
-//
-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
-//
-// @see Parser//setErrorHandler(ANTLRErrorStrategy)
-
-type BailErrorStrategy struct {
- *DefaultErrorStrategy
-}
-
-var _ ErrorStrategy = &BailErrorStrategy{}
-
-func NewBailErrorStrategy() *BailErrorStrategy {
-
- b := new(BailErrorStrategy)
-
- b.DefaultErrorStrategy = NewDefaultErrorStrategy()
-
- return b
-}
-
-// Instead of recovering from exception {@code e}, re-panic it wrapped
-// in a {@link ParseCancellationException} so it is not caught by the
-// rule func catches. Use {@link Exception//getCause()} to get the
-// original {@link RecognitionException}.
-func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
- context := recognizer.GetParserRuleContext()
- for context != nil {
- context.SetException(e)
- if parent, ok := context.GetParent().(ParserRuleContext); ok {
- context = parent
- } else {
- context = nil
- }
- }
- panic(NewParseCancellationException()) // TODO we don't emit e properly
-}
-
-// Make sure we don't attempt to recover inline if the parser
-// successfully recovers, it won't panic an exception.
-func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
- b.Recover(recognizer, NewInputMisMatchException(recognizer))
-
- return nil
-}
-
-// Make sure we don't attempt to recover from problems in subrules.//
-func (b *BailErrorStrategy) Sync(recognizer Parser) {
- // pass
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
deleted file mode 100644
index 3954c1378..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
-// 3 kinds of errors: prediction errors, failed predicate errors, and
-// mismatched input errors. In each case, the parser knows where it is
-// in the input, where it is in the ATN, the rule invocation stack,
-// and what kind of problem occurred.
-
-type RecognitionException interface {
- GetOffendingToken() Token
- GetMessage() string
- GetInputStream() IntStream
-}
-
-type BaseRecognitionException struct {
- message string
- recognizer Recognizer
- offendingToken Token
- offendingState int
- ctx RuleContext
- input IntStream
-}
-
-func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
-
- // todo
- // Error.call(this)
- //
- // if (!!Error.captureStackTrace) {
- // Error.captureStackTrace(this, RecognitionException)
- // } else {
- // stack := NewError().stack
- // }
- // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
-
- t := new(BaseRecognitionException)
-
- t.message = message
- t.recognizer = recognizer
- t.input = input
- t.ctx = ctx
- // The current {@link Token} when an error occurred. Since not all streams
- // support accessing symbols by index, we have to track the {@link Token}
- // instance itself.
- t.offendingToken = nil
- // Get the ATN state number the parser was in at the time the error
- // occurred. For {@link NoViableAltException} and
- // {@link LexerNoViableAltException} exceptions, this is the
- // {@link DecisionState} number. For others, it is the state whose outgoing
- // edge we couldn't Match.
- t.offendingState = -1
- if t.recognizer != nil {
- t.offendingState = t.recognizer.GetState()
- }
-
- return t
-}
-
-func (b *BaseRecognitionException) GetMessage() string {
- return b.message
-}
-
-func (b *BaseRecognitionException) GetOffendingToken() Token {
- return b.offendingToken
-}
-
-func (b *BaseRecognitionException) GetInputStream() IntStream {
- return b.input
-}
-
-// If the state number is not known, b method returns -1.
-
-// Gets the set of input symbols which could potentially follow the
-// previously Matched symbol at the time b exception was panicn.
-//
-// If the set of expected tokens is not known and could not be computed,
-// b method returns {@code nil}.
-//
-// @return The set of token types that could potentially follow the current
-// state in the ATN, or {@code nil} if the information is not available.
-// /
-func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
- if b.recognizer != nil {
- return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
- }
-
- return nil
-}
-
-func (b *BaseRecognitionException) String() string {
- return b.message
-}
-
-type LexerNoViableAltException struct {
- *BaseRecognitionException
-
- startIndex int
- deadEndConfigs ATNConfigSet
-}
-
-func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
-
- l := new(LexerNoViableAltException)
-
- l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
-
- l.startIndex = startIndex
- l.deadEndConfigs = deadEndConfigs
-
- return l
-}
-
-func (l *LexerNoViableAltException) String() string {
- symbol := ""
- if l.startIndex >= 0 && l.startIndex < l.input.Size() {
- symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
- }
- return "LexerNoViableAltException" + symbol
-}
-
-type NoViableAltException struct {
- *BaseRecognitionException
-
- startToken Token
- offendingToken Token
- ctx ParserRuleContext
- deadEndConfigs ATNConfigSet
-}
-
-// Indicates that the parser could not decide which of two or more paths
-// to take based upon the remaining input. It tracks the starting token
-// of the offending input and also knows where the parser was
-// in the various paths when the error. Reported by ReportNoViableAlternative()
-func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
-
- if ctx == nil {
- ctx = recognizer.GetParserRuleContext()
- }
-
- if offendingToken == nil {
- offendingToken = recognizer.GetCurrentToken()
- }
-
- if startToken == nil {
- startToken = recognizer.GetCurrentToken()
- }
-
- if input == nil {
- input = recognizer.GetInputStream().(TokenStream)
- }
-
- n := new(NoViableAltException)
- n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
-
- // Which configurations did we try at input.Index() that couldn't Match
- // input.LT(1)?//
- n.deadEndConfigs = deadEndConfigs
- // The token object at the start index the input stream might
- // not be buffering tokens so get a reference to it. (At the
- // time the error occurred, of course the stream needs to keep a
- // buffer all of the tokens but later we might not have access to those.)
- n.startToken = startToken
- n.offendingToken = offendingToken
-
- return n
-}
-
-type InputMisMatchException struct {
- *BaseRecognitionException
-}
-
-// This signifies any kind of mismatched input exceptions such as
-// when the current input does not Match the expected token.
-func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
-
- i := new(InputMisMatchException)
- i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
-
- i.offendingToken = recognizer.GetCurrentToken()
-
- return i
-
-}
-
-// A semantic predicate failed during validation. Validation of predicates
-// occurs when normally parsing the alternative just like Matching a token.
-// Disambiguating predicate evaluation occurs when we test a predicate during
-// prediction.
-
-type FailedPredicateException struct {
- *BaseRecognitionException
-
- ruleIndex int
- predicateIndex int
- predicate string
-}
-
-func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
-
- f := new(FailedPredicateException)
-
- f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
-
- s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
- trans := s.GetTransitions()[0]
- if trans2, ok := trans.(*PredicateTransition); ok {
- f.ruleIndex = trans2.ruleIndex
- f.predicateIndex = trans2.predIndex
- } else {
- f.ruleIndex = 0
- f.predicateIndex = 0
- }
- f.predicate = predicate
- f.offendingToken = recognizer.GetCurrentToken()
-
- return f
-}
-
-func (f *FailedPredicateException) formatMessage(predicate, message string) string {
- if message != "" {
- return message
- }
-
- return "failed predicate: {" + predicate + "}?"
-}
-
-type ParseCancellationException struct {
-}
-
-func NewParseCancellationException() *ParseCancellationException {
- // Error.call(this)
- // Error.captureStackTrace(this, ParseCancellationException)
- return new(ParseCancellationException)
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
deleted file mode 100644
index bd6ad5efe..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bytes"
- "io"
- "os"
-)
-
-// This is an InputStream that is loaded from a file all at once
-// when you construct the object.
-
-type FileStream struct {
- *InputStream
-
- filename string
-}
-
-func NewFileStream(fileName string) (*FileStream, error) {
-
- buf := bytes.NewBuffer(nil)
-
- f, err := os.Open(fileName)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- _, err = io.Copy(buf, f)
- if err != nil {
- return nil, err
- }
-
- fs := new(FileStream)
-
- fs.filename = fileName
- s := string(buf.Bytes())
-
- fs.InputStream = NewInputStream(s)
-
- return fs, nil
-
-}
-
-func (f *FileStream) GetSourceName() string {
- return f.filename
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
deleted file mode 100644
index a8b889ced..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type InputStream struct {
- name string
- index int
- data []rune
- size int
-}
-
-func NewInputStream(data string) *InputStream {
-
- is := new(InputStream)
-
- is.name = ""
- is.index = 0
- is.data = []rune(data)
- is.size = len(is.data) // number of runes
-
- return is
-}
-
-func (is *InputStream) reset() {
- is.index = 0
-}
-
-func (is *InputStream) Consume() {
- if is.index >= is.size {
- // assert is.LA(1) == TokenEOF
- panic("cannot consume EOF")
- }
- is.index++
-}
-
-func (is *InputStream) LA(offset int) int {
-
- if offset == 0 {
- return 0 // nil
- }
- if offset < 0 {
- offset++ // e.g., translate LA(-1) to use offset=0
- }
- pos := is.index + offset - 1
-
- if pos < 0 || pos >= is.size { // invalid
- return TokenEOF
- }
-
- return int(is.data[pos])
-}
-
-func (is *InputStream) LT(offset int) int {
- return is.LA(offset)
-}
-
-func (is *InputStream) Index() int {
- return is.index
-}
-
-func (is *InputStream) Size() int {
- return is.size
-}
-
-// mark/release do nothing we have entire buffer
-func (is *InputStream) Mark() int {
- return -1
-}
-
-func (is *InputStream) Release(marker int) {
-}
-
-func (is *InputStream) Seek(index int) {
- if index <= is.index {
- is.index = index // just jump don't update stream state (line,...)
- return
- }
- // seek forward
- is.index = intMin(index, is.size)
-}
-
-func (is *InputStream) GetText(start int, stop int) string {
- if stop >= is.size {
- stop = is.size - 1
- }
- if start >= is.size {
- return ""
- }
-
- return string(is.data[start : stop+1])
-}
-
-func (is *InputStream) GetTextFromTokens(start, stop Token) string {
- if start != nil && stop != nil {
- return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
- }
-
- return ""
-}
-
-func (is *InputStream) GetTextFromInterval(i *Interval) string {
- return is.GetText(i.Start, i.Stop)
-}
-
-func (*InputStream) GetSourceName() string {
- return "Obtained from string"
-}
-
-func (is *InputStream) String() string {
- return string(is.data)
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
deleted file mode 100644
index e5a74f0c6..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go
+++ /dev/null
@@ -1,198 +0,0 @@
-package antlr
-
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-import (
- "sort"
-)
-
-// Collectable is an interface that a struct should implement if it is to be
-// usable as a key in these collections.
-type Collectable[T any] interface {
- Hash() int
- Equals(other Collectable[T]) bool
-}
-
-type Comparator[T any] interface {
- Hash1(o T) int
- Equals2(T, T) bool
-}
-
-// JStore implements a container that allows the use of a struct to calculate the key
-// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
-// serve the needs of the ANTLR Go runtime.
-//
-// For ease of porting the logic of the runtime from the master target (Java), this collection
-// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
-// function as the key. The values are stored in a standard go map which internally is a form of hashmap
-// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
-// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
-// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
-// we understand the requirements, then this is fine - this is not a general purpose collection.
-type JStore[T any, C Comparator[T]] struct {
- store map[int][]T
- len int
- comparator Comparator[T]
-}
-
-func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] {
-
- if comparator == nil {
- panic("comparator cannot be nil")
- }
-
- s := &JStore[T, C]{
- store: make(map[int][]T, 1),
- comparator: comparator,
- }
- return s
-}
-
-// Put will store given value in the collection. Note that the key for storage is generated from
-// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
-// as any kind of general collection.
-//
-// If the key has a hash conflict, then the value will be added to the slice of values associated with the
-// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
-// tested by calling the equals() method on the key.
-//
-// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
-//
-// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
-func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn
-
- kh := s.comparator.Hash1(value)
-
- for _, v1 := range s.store[kh] {
- if s.comparator.Equals2(value, v1) {
- return v1, true
- }
- }
- s.store[kh] = append(s.store[kh], value)
- s.len++
- return value, false
-}
-
-// Get will return the value associated with the key - the type of the key is the same type as the value
-// which would not generally be useful, but this is a specific thing for ANTLR where the key is
-// generated using the object we are going to store.
-func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn
-
- kh := s.comparator.Hash1(key)
-
- for _, v := range s.store[kh] {
- if s.comparator.Equals2(key, v) {
- return v, true
- }
- }
- return key, false
-}
-
-// Contains returns true if the given key is present in the store
-func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn
-
- _, present := s.Get(key)
- return present
-}
-
-func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
- vs := make([]T, 0, len(s.store))
- for _, v := range s.store {
- vs = append(vs, v...)
- }
- sort.Slice(vs, func(i, j int) bool {
- return less(vs[i], vs[j])
- })
-
- return vs
-}
-
-func (s *JStore[T, C]) Each(f func(T) bool) {
- for _, e := range s.store {
- for _, v := range e {
- f(v)
- }
- }
-}
-
-func (s *JStore[T, C]) Len() int {
- return s.len
-}
-
-func (s *JStore[T, C]) Values() []T {
- vs := make([]T, 0, len(s.store))
- for _, e := range s.store {
- for _, v := range e {
- vs = append(vs, v)
- }
- }
- return vs
-}
-
-type entry[K, V any] struct {
- key K
- val V
-}
-
-type JMap[K, V any, C Comparator[K]] struct {
- store map[int][]*entry[K, V]
- len int
- comparator Comparator[K]
-}
-
-func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] {
- return &JMap[K, V, C]{
- store: make(map[int][]*entry[K, V], 1),
- comparator: comparator,
- }
-}
-
-func (m *JMap[K, V, C]) Put(key K, val V) {
- kh := m.comparator.Hash1(key)
-
- m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
- m.len++
-}
-
-func (m *JMap[K, V, C]) Values() []V {
- vs := make([]V, 0, len(m.store))
- for _, e := range m.store {
- for _, v := range e {
- vs = append(vs, v.val)
- }
- }
- return vs
-}
-
-func (m *JMap[K, V, C]) Get(key K) (V, bool) {
-
- var none V
- kh := m.comparator.Hash1(key)
- for _, e := range m.store[kh] {
- if m.comparator.Equals2(e.key, key) {
- return e.val, true
- }
- }
- return none, false
-}
-
-func (m *JMap[K, V, C]) Len() int {
- return len(m.store)
-}
-
-func (m *JMap[K, V, C]) Delete(key K) {
- kh := m.comparator.Hash1(key)
- for i, e := range m.store[kh] {
- if m.comparator.Equals2(e.key, key) {
- m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
- m.len--
- return
- }
- }
-}
-
-func (m *JMap[K, V, C]) Clear() {
- m.store = make(map[int][]*entry[K, V])
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
deleted file mode 100644
index 6533f0516..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
-)
-
-// A lexer is recognizer that draws input symbols from a character stream.
-// lexer grammars result in a subclass of this object. A Lexer object
-// uses simplified Match() and error recovery mechanisms in the interest
-// of speed.
-///
-
-type Lexer interface {
- TokenSource
- Recognizer
-
- Emit() Token
-
- SetChannel(int)
- PushMode(int)
- PopMode() int
- SetType(int)
- SetMode(int)
-}
-
-type BaseLexer struct {
- *BaseRecognizer
-
- Interpreter ILexerATNSimulator
- TokenStartCharIndex int
- TokenStartLine int
- TokenStartColumn int
- ActionType int
- Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
-
- input CharStream
- factory TokenFactory
- tokenFactorySourcePair *TokenSourceCharStreamPair
- token Token
- hitEOF bool
- channel int
- thetype int
- modeStack IntStack
- mode int
- text string
-}
-
-func NewBaseLexer(input CharStream) *BaseLexer {
-
- lexer := new(BaseLexer)
-
- lexer.BaseRecognizer = NewBaseRecognizer()
-
- lexer.input = input
- lexer.factory = CommonTokenFactoryDEFAULT
- lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
-
- lexer.Virt = lexer
-
- lexer.Interpreter = nil // child classes must populate it
-
- // The goal of all lexer rules/methods is to create a token object.
- // l is an instance variable as multiple rules may collaborate to
- // create a single token. NextToken will return l object after
- // Matching lexer rule(s). If you subclass to allow multiple token
- // emissions, then set l to the last token to be Matched or
- // something nonnil so that the auto token emit mechanism will not
- // emit another token.
- lexer.token = nil
-
- // What character index in the stream did the current token start at?
- // Needed, for example, to get the text for current token. Set at
- // the start of NextToken.
- lexer.TokenStartCharIndex = -1
-
- // The line on which the first character of the token resides///
- lexer.TokenStartLine = -1
-
- // The character position of first character within the line///
- lexer.TokenStartColumn = -1
-
- // Once we see EOF on char stream, next token will be EOF.
- // If you have DONE : EOF then you see DONE EOF.
- lexer.hitEOF = false
-
- // The channel number for the current token///
- lexer.channel = TokenDefaultChannel
-
- // The token type for the current token///
- lexer.thetype = TokenInvalidType
-
- lexer.modeStack = make([]int, 0)
- lexer.mode = LexerDefaultMode
-
- // You can set the text for the current token to override what is in
- // the input char buffer. Use setText() or can set l instance var.
- // /
- lexer.text = ""
-
- return lexer
-}
-
-const (
- LexerDefaultMode = 0
- LexerMore = -2
- LexerSkip = -3
-)
-
-const (
- LexerDefaultTokenChannel = TokenDefaultChannel
- LexerHidden = TokenHiddenChannel
- LexerMinCharValue = 0x0000
- LexerMaxCharValue = 0x10FFFF
-)
-
-func (b *BaseLexer) reset() {
- // wack Lexer state variables
- if b.input != nil {
- b.input.Seek(0) // rewind the input
- }
- b.token = nil
- b.thetype = TokenInvalidType
- b.channel = TokenDefaultChannel
- b.TokenStartCharIndex = -1
- b.TokenStartColumn = -1
- b.TokenStartLine = -1
- b.text = ""
-
- b.hitEOF = false
- b.mode = LexerDefaultMode
- b.modeStack = make([]int, 0)
-
- b.Interpreter.reset()
-}
-
-func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
- return b.Interpreter
-}
-
-func (b *BaseLexer) GetInputStream() CharStream {
- return b.input
-}
-
-func (b *BaseLexer) GetSourceName() string {
- return b.GrammarFileName
-}
-
-func (b *BaseLexer) SetChannel(v int) {
- b.channel = v
-}
-
-func (b *BaseLexer) GetTokenFactory() TokenFactory {
- return b.factory
-}
-
-func (b *BaseLexer) setTokenFactory(f TokenFactory) {
- b.factory = f
-}
-
-func (b *BaseLexer) safeMatch() (ret int) {
- defer func() {
- if e := recover(); e != nil {
- if re, ok := e.(RecognitionException); ok {
- b.notifyListeners(re) // Report error
- b.Recover(re)
- ret = LexerSkip // default
- }
- }
- }()
-
- return b.Interpreter.Match(b.input, b.mode)
-}
-
-// Return a token from l source i.e., Match a token on the char stream.
-func (b *BaseLexer) NextToken() Token {
- if b.input == nil {
- panic("NextToken requires a non-nil input stream.")
- }
-
- tokenStartMarker := b.input.Mark()
-
- // previously in finally block
- defer func() {
- // make sure we release marker after Match or
- // unbuffered char stream will keep buffering
- b.input.Release(tokenStartMarker)
- }()
-
- for {
- if b.hitEOF {
- b.EmitEOF()
- return b.token
- }
- b.token = nil
- b.channel = TokenDefaultChannel
- b.TokenStartCharIndex = b.input.Index()
- b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
- b.TokenStartLine = b.Interpreter.GetLine()
- b.text = ""
- continueOuter := false
- for {
- b.thetype = TokenInvalidType
- ttype := LexerSkip
-
- ttype = b.safeMatch()
-
- if b.input.LA(1) == TokenEOF {
- b.hitEOF = true
- }
- if b.thetype == TokenInvalidType {
- b.thetype = ttype
- }
- if b.thetype == LexerSkip {
- continueOuter = true
- break
- }
- if b.thetype != LexerMore {
- break
- }
- }
-
- if continueOuter {
- continue
- }
- if b.token == nil {
- b.Virt.Emit()
- }
- return b.token
- }
-}
-
-// Instruct the lexer to Skip creating a token for current lexer rule
-// and look for another token. NextToken() knows to keep looking when
-// a lexer rule finishes with token set to SKIPTOKEN. Recall that
-// if token==nil at end of any token rule, it creates one for you
-// and emits it.
-// /
-func (b *BaseLexer) Skip() {
- b.thetype = LexerSkip
-}
-
-func (b *BaseLexer) More() {
- b.thetype = LexerMore
-}
-
-func (b *BaseLexer) SetMode(m int) {
- b.mode = m
-}
-
-func (b *BaseLexer) PushMode(m int) {
- if LexerATNSimulatorDebug {
- fmt.Println("pushMode " + strconv.Itoa(m))
- }
- b.modeStack.Push(b.mode)
- b.mode = m
-}
-
-func (b *BaseLexer) PopMode() int {
- if len(b.modeStack) == 0 {
- panic("Empty Stack")
- }
- if LexerATNSimulatorDebug {
- fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
- }
- i, _ := b.modeStack.Pop()
- b.mode = i
- return b.mode
-}
-
-func (b *BaseLexer) inputStream() CharStream {
- return b.input
-}
-
-// SetInputStream resets the lexer input stream and associated lexer state.
-func (b *BaseLexer) SetInputStream(input CharStream) {
- b.input = nil
- b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
- b.reset()
- b.input = input
- b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
-}
-
-func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
- return b.tokenFactorySourcePair
-}
-
-// By default does not support multiple emits per NextToken invocation
-// for efficiency reasons. Subclass and override l method, NextToken,
-// and GetToken (to push tokens into a list and pull from that list
-// rather than a single variable as l implementation does).
-// /
-func (b *BaseLexer) EmitToken(token Token) {
- b.token = token
-}
-
-// The standard method called to automatically emit a token at the
-// outermost lexical rule. The token object should point into the
-// char buffer start..stop. If there is a text override in 'text',
-// use that to set the token's text. Override l method to emit
-// custom Token objects or provide a Newfactory.
-// /
-func (b *BaseLexer) Emit() Token {
- t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
- b.EmitToken(t)
- return t
-}
-
-func (b *BaseLexer) EmitEOF() Token {
- cpos := b.GetCharPositionInLine()
- lpos := b.GetLine()
- eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
- b.EmitToken(eof)
- return eof
-}
-
-func (b *BaseLexer) GetCharPositionInLine() int {
- return b.Interpreter.GetCharPositionInLine()
-}
-
-func (b *BaseLexer) GetLine() int {
- return b.Interpreter.GetLine()
-}
-
-func (b *BaseLexer) GetType() int {
- return b.thetype
-}
-
-func (b *BaseLexer) SetType(t int) {
- b.thetype = t
-}
-
-// What is the index of the current character of lookahead?///
-func (b *BaseLexer) GetCharIndex() int {
- return b.input.Index()
-}
-
-// Return the text Matched so far for the current token or any text override.
-// Set the complete text of l token it wipes any previous changes to the text.
-func (b *BaseLexer) GetText() string {
- if b.text != "" {
- return b.text
- }
-
- return b.Interpreter.GetText(b.input)
-}
-
-func (b *BaseLexer) SetText(text string) {
- b.text = text
-}
-
-func (b *BaseLexer) GetATN() *ATN {
- return b.Interpreter.ATN()
-}
-
-// Return a list of all Token objects in input char stream.
-// Forces load of all tokens. Does not include EOF token.
-// /
-func (b *BaseLexer) GetAllTokens() []Token {
- vl := b.Virt
- tokens := make([]Token, 0)
- t := vl.NextToken()
- for t.GetTokenType() != TokenEOF {
- tokens = append(tokens, t)
- t = vl.NextToken()
- }
- return tokens
-}
-
-func (b *BaseLexer) notifyListeners(e RecognitionException) {
- start := b.TokenStartCharIndex
- stop := b.input.Index()
- text := b.input.GetTextFromInterval(NewInterval(start, stop))
- msg := "token recognition error at: '" + text + "'"
- listener := b.GetErrorListenerDispatch()
- listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
-}
-
-func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
- if c == TokenEOF {
- return ""
- } else if c == '\n' {
- return "\\n"
- } else if c == '\t' {
- return "\\t"
- } else if c == '\r' {
- return "\\r"
- } else {
- return string(c)
- }
-}
-
-func (b *BaseLexer) getCharErrorDisplay(c rune) string {
- return "'" + b.getErrorDisplayForChar(c) + "'"
-}
-
-// Lexers can normally Match any char in it's vocabulary after Matching
-// a token, so do the easy thing and just kill a character and hope
-// it all works out. You can instead use the rule invocation stack
-// to do sophisticated error recovery if you are in a fragment rule.
-// /
-func (b *BaseLexer) Recover(re RecognitionException) {
- if b.input.LA(1) != TokenEOF {
- if _, ok := re.(*LexerNoViableAltException); ok {
- // Skip a char and try again
- b.Interpreter.Consume(b.input)
- } else {
- // TODO: Do we lose character or line position information?
- b.input.Consume()
- }
- }
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
deleted file mode 100644
index be1ba7a7e..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import "golang.org/x/exp/slices"
-
-// Represents an executor for a sequence of lexer actions which traversed during
-// the Matching operation of a lexer rule (token).
-//
-// The executor tracks position information for position-dependent lexer actions
-// efficiently, ensuring that actions appearing only at the end of the rule do
-// not cause bloating of the {@link DFA} created for the lexer.
-
-type LexerActionExecutor struct {
- lexerActions []LexerAction
- cachedHash int
-}
-
-func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
-
- if lexerActions == nil {
- lexerActions = make([]LexerAction, 0)
- }
-
- l := new(LexerActionExecutor)
-
- l.lexerActions = lexerActions
-
- // Caches the result of {@link //hashCode} since the hash code is an element
- // of the performance-critical {@link LexerATNConfig//hashCode} operation.
- l.cachedHash = murmurInit(57)
- for _, a := range lexerActions {
- l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
- }
-
- return l
-}
-
-// Creates a {@link LexerActionExecutor} which executes the actions for
-// the input {@code lexerActionExecutor} followed by a specified
-// {@code lexerAction}.
-//
-// @param lexerActionExecutor The executor for actions already traversed by
-// the lexer while Matching a token within a particular
-// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
-// though it were an empty executor.
-// @param lexerAction The lexer action to execute after the actions
-// specified in {@code lexerActionExecutor}.
-//
-// @return A {@link LexerActionExecutor} for executing the combine actions
-// of {@code lexerActionExecutor} and {@code lexerAction}.
-func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
- if lexerActionExecutor == nil {
- return NewLexerActionExecutor([]LexerAction{lexerAction})
- }
-
- return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
-}
-
-// Creates a {@link LexerActionExecutor} which encodes the current offset
-// for position-dependent lexer actions.
-//
-// Normally, when the executor encounters lexer actions where
-// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
-// {@link IntStream//seek} on the input {@link CharStream} to set the input
-// position to the end of the current token. This behavior provides
-// for efficient DFA representation of lexer actions which appear at the end
-// of a lexer rule, even when the lexer rule Matches a variable number of
-// characters.
-//
-// Prior to traversing a Match transition in the ATN, the current offset
-// from the token start index is assigned to all position-dependent lexer
-// actions which have not already been assigned a fixed offset. By storing
-// the offsets relative to the token start index, the DFA representation of
-// lexer actions which appear in the middle of tokens remains efficient due
-// to sharing among tokens of the same length, regardless of their absolute
-// position in the input stream.
-//
-// If the current executor already has offsets assigned to all
-// position-dependent lexer actions, the method returns {@code this}.
-//
-// @param offset The current offset to assign to all position-dependent
-// lexer actions which do not already have offsets assigned.
-//
-// @return A {@link LexerActionExecutor} which stores input stream offsets
-// for all position-dependent lexer actions.
-// /
-func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
- var updatedLexerActions []LexerAction
- for i := 0; i < len(l.lexerActions); i++ {
- _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
- if l.lexerActions[i].getIsPositionDependent() && !ok {
- if updatedLexerActions == nil {
- updatedLexerActions = make([]LexerAction, 0)
-
- for _, a := range l.lexerActions {
- updatedLexerActions = append(updatedLexerActions, a)
- }
- }
-
- updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
- }
- }
- if updatedLexerActions == nil {
- return l
- }
-
- return NewLexerActionExecutor(updatedLexerActions)
-}
-
-// Execute the actions encapsulated by l executor within the context of a
-// particular {@link Lexer}.
-//
-// This method calls {@link IntStream//seek} to set the position of the
-// {@code input} {@link CharStream} prior to calling
-// {@link LexerAction//execute} on a position-dependent action. Before the
-// method returns, the input position will be restored to the same position
-// it was in when the method was invoked.
-//
-// @param lexer The lexer instance.
-// @param input The input stream which is the source for the current token.
-// When l method is called, the current {@link IntStream//index} for
-// {@code input} should be the start of the following token, i.e. 1
-// character past the end of the current token.
-// @param startIndex The token start index. This value may be passed to
-// {@link IntStream//seek} to set the {@code input} position to the beginning
-// of the token.
-// /
-func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
- requiresSeek := false
- stopIndex := input.Index()
-
- defer func() {
- if requiresSeek {
- input.Seek(stopIndex)
- }
- }()
-
- for i := 0; i < len(l.lexerActions); i++ {
- lexerAction := l.lexerActions[i]
- if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
- offset := la.offset
- input.Seek(startIndex + offset)
- lexerAction = la.lexerAction
- requiresSeek = (startIndex + offset) != stopIndex
- } else if lexerAction.getIsPositionDependent() {
- input.Seek(stopIndex)
- requiresSeek = false
- }
- lexerAction.execute(lexer)
- }
-}
-
-func (l *LexerActionExecutor) Hash() int {
- if l == nil {
- // TODO: Why is this here? l should not be nil
- return 61
- }
-
- // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
- return l.cachedHash
-}
-
-func (l *LexerActionExecutor) Equals(other interface{}) bool {
- if l == other {
- return true
- }
- othert, ok := other.(*LexerActionExecutor)
- if !ok {
- return false
- }
- if othert == nil {
- return false
- }
- if l.cachedHash != othert.cachedHash {
- return false
- }
- if len(l.lexerActions) != len(othert.lexerActions) {
- return false
- }
- return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
- return i.Equals(j)
- })
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
deleted file mode 100644
index 76689615a..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-type LL1Analyzer struct {
- atn *ATN
-}
-
-func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
- la := new(LL1Analyzer)
- la.atn = atn
- return la
-}
-
-// - Special value added to the lookahead sets to indicate that we hit
-// a predicate during analysis if {@code seeThruPreds==false}.
-//
-// /
-const (
- LL1AnalyzerHitPred = TokenInvalidType
-)
-
-// *
-// Calculates the SLL(1) expected lookahead set for each outgoing transition
-// of an {@link ATNState}. The returned array has one element for each
-// outgoing transition in {@code s}. If the closure from transition
-// i leads to a semantic predicate before Matching a symbol, the
-// element at index i of the result will be {@code nil}.
-//
-// @param s the ATN state
-// @return the expected symbols for each outgoing transition of {@code s}.
-func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
- if s == nil {
- return nil
- }
- count := len(s.GetTransitions())
- look := make([]*IntervalSet, count)
- for alt := 0; alt < count; alt++ {
- look[alt] = NewIntervalSet()
- lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
- seeThruPreds := false // fail to get lookahead upon pred
- la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
- // Wipe out lookahead for la alternative if we found nothing
- // or we had a predicate when we !seeThruPreds
- if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
- look[alt] = nil
- }
- }
- return look
-}
-
-// *
-// Compute set of tokens that can follow {@code s} in the ATN in the
-// specified {@code ctx}.
-//
-// If {@code ctx} is {@code nil} and the end of the rule containing
-// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
-// If {@code ctx} is not {@code nil} and the end of the outermost rule is
-// reached, {@link Token//EOF} is added to the result set.
-//
-// @param s the ATN state
-// @param stopState the ATN state to stop at. This can be a
-// {@link BlockEndState} to detect epsilon paths through a closure.
-// @param ctx the complete parser context, or {@code nil} if the context
-// should be ignored
-//
-// @return The set of tokens that can follow {@code s} in the ATN in the
-// specified {@code ctx}.
-// /
-func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
- r := NewIntervalSet()
- seeThruPreds := true // ignore preds get all lookahead
- var lookContext PredictionContext
- if ctx != nil {
- lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
- }
- la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true)
- return r
-}
-
-//*
-// Compute set of tokens that can follow {@code s} in the ATN in the
-// specified {@code ctx}.
-//
-// If {@code ctx} is {@code nil} and {@code stopState} or the end of the
-// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
-// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
-// {@code true} and {@code stopState} or the end of the outermost rule is
-// reached, {@link Token//EOF} is added to the result set.
-//
-// @param s the ATN state.
-// @param stopState the ATN state to stop at. This can be a
-// {@link BlockEndState} to detect epsilon paths through a closure.
-// @param ctx The outer context, or {@code nil} if the outer context should
-// not be used.
-// @param look The result lookahead set.
-// @param lookBusy A set used for preventing epsilon closures in the ATN
-// from causing a stack overflow. Outside code should pass
-// {@code NewSet} for la argument.
-// @param calledRuleStack A set used for preventing left recursion in the
-// ATN from causing a stack overflow. Outside code should pass
-// {@code NewBitSet()} for la argument.
-// @param seeThruPreds {@code true} to true semantic predicates as
-// implicitly {@code true} and "see through them", otherwise {@code false}
-// to treat semantic predicates as opaque and add {@link //HitPred} to the
-// result if one is encountered.
-// @param addEOF Add {@link Token//EOF} to the result if the end of the
-// outermost context is reached. This parameter has no effect if {@code ctx}
-// is {@code nil}.
-
-func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
-
- returnState := la.atn.states[ctx.getReturnState(i)]
- la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
-
-}
-
-func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
-
- c := NewBaseATNConfig6(s, 0, ctx)
-
- if lookBusy.Contains(c) {
- return
- }
-
- _, present := lookBusy.Put(c)
- if present {
- return
-
- }
- if s == stopState {
- if ctx == nil {
- look.addOne(TokenEpsilon)
- return
- } else if ctx.isEmpty() && addEOF {
- look.addOne(TokenEOF)
- return
- }
- }
-
- _, ok := s.(*RuleStopState)
-
- if ok {
- if ctx == nil {
- look.addOne(TokenEpsilon)
- return
- } else if ctx.isEmpty() && addEOF {
- look.addOne(TokenEOF)
- return
- }
-
- if ctx != BasePredictionContextEMPTY {
- removed := calledRuleStack.contains(s.GetRuleIndex())
- defer func() {
- if removed {
- calledRuleStack.add(s.GetRuleIndex())
- }
- }()
- calledRuleStack.remove(s.GetRuleIndex())
- // run thru all possible stack tops in ctx
- for i := 0; i < ctx.length(); i++ {
- returnState := la.atn.states[ctx.getReturnState(i)]
- la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
- }
- return
- }
- }
-
- n := len(s.GetTransitions())
-
- for i := 0; i < n; i++ {
- t := s.GetTransitions()[i]
-
- if t1, ok := t.(*RuleTransition); ok {
- if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
- continue
- }
-
- newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
- la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
- } else if t2, ok := t.(AbstractPredicateTransition); ok {
- if seeThruPreds {
- la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
- } else {
- look.addOne(LL1AnalyzerHitPred)
- }
- } else if t.getIsEpsilon() {
- la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
- } else if _, ok := t.(*WildcardTransition); ok {
- look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
- } else {
- set := t.getLabel()
- if set != nil {
- if _, ok := t.(*NotSetTransition); ok {
- set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
- }
- look.addSet(set)
- }
- }
- }
-}
-
-func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
-
- newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
-
- defer func() {
- calledRuleStack.remove(t1.getTarget().GetRuleIndex())
- }()
-
- calledRuleStack.add(t1.getTarget().GetRuleIndex())
- la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
-
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
deleted file mode 100644
index d26bf0639..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go
+++ /dev/null
@@ -1,708 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
-)
-
-type Parser interface {
- Recognizer
-
- GetInterpreter() *ParserATNSimulator
-
- GetTokenStream() TokenStream
- GetTokenFactory() TokenFactory
- GetParserRuleContext() ParserRuleContext
- SetParserRuleContext(ParserRuleContext)
- Consume() Token
- GetParseListeners() []ParseTreeListener
-
- GetErrorHandler() ErrorStrategy
- SetErrorHandler(ErrorStrategy)
- GetInputStream() IntStream
- GetCurrentToken() Token
- GetExpectedTokens() *IntervalSet
- NotifyErrorListeners(string, Token, RecognitionException)
- IsExpectedToken(int) bool
- GetPrecedence() int
- GetRuleInvocationStack(ParserRuleContext) []string
-}
-
-type BaseParser struct {
- *BaseRecognizer
-
- Interpreter *ParserATNSimulator
- BuildParseTrees bool
-
- input TokenStream
- errHandler ErrorStrategy
- precedenceStack IntStack
- ctx ParserRuleContext
-
- tracer *TraceListener
- parseListeners []ParseTreeListener
- _SyntaxErrors int
-}
-
-// p.is all the parsing support code essentially most of it is error
-// recovery stuff.//
-func NewBaseParser(input TokenStream) *BaseParser {
-
- p := new(BaseParser)
-
- p.BaseRecognizer = NewBaseRecognizer()
-
- // The input stream.
- p.input = nil
- // The error handling strategy for the parser. The default value is a new
- // instance of {@link DefaultErrorStrategy}.
- p.errHandler = NewDefaultErrorStrategy()
- p.precedenceStack = make([]int, 0)
- p.precedenceStack.Push(0)
- // The {@link ParserRuleContext} object for the currently executing rule.
- // p.is always non-nil during the parsing process.
- p.ctx = nil
- // Specifies whether or not the parser should construct a parse tree during
- // the parsing process. The default value is {@code true}.
- p.BuildParseTrees = true
- // When {@link //setTrace}{@code (true)} is called, a reference to the
- // {@link TraceListener} is stored here so it can be easily removed in a
- // later call to {@link //setTrace}{@code (false)}. The listener itself is
- // implemented as a parser listener so p.field is not directly used by
- // other parser methods.
- p.tracer = nil
- // The list of {@link ParseTreeListener} listeners registered to receive
- // events during the parse.
- p.parseListeners = nil
- // The number of syntax errors Reported during parsing. p.value is
- // incremented each time {@link //NotifyErrorListeners} is called.
- p._SyntaxErrors = 0
- p.SetInputStream(input)
-
- return p
-}
-
-// p.field maps from the serialized ATN string to the deserialized {@link
-// ATN} with
-// bypass alternatives.
-//
-// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
-var bypassAltsAtnCache = make(map[string]int)
-
-// reset the parser's state//
-func (p *BaseParser) reset() {
- if p.input != nil {
- p.input.Seek(0)
- }
- p.errHandler.reset(p)
- p.ctx = nil
- p._SyntaxErrors = 0
- p.SetTrace(nil)
- p.precedenceStack = make([]int, 0)
- p.precedenceStack.Push(0)
- if p.Interpreter != nil {
- p.Interpreter.reset()
- }
-}
-
-func (p *BaseParser) GetErrorHandler() ErrorStrategy {
- return p.errHandler
-}
-
-func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
- p.errHandler = e
-}
-
-// Match current input symbol against {@code ttype}. If the symbol type
-// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
-// called to complete the Match process.
-//
-// If the symbol type does not Match,
-// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
-// strategy to attempt recovery. If {@link //getBuildParseTree} is
-// {@code true} and the token index of the symbol returned by
-// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
-// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-//
-// @param ttype the token type to Match
-// @return the Matched symbol
-// @panics RecognitionException if the current input symbol did not Match
-// {@code ttype} and the error strategy could not recover from the
-// mismatched symbol
-
-func (p *BaseParser) Match(ttype int) Token {
-
- t := p.GetCurrentToken()
-
- if t.GetTokenType() == ttype {
- p.errHandler.ReportMatch(p)
- p.Consume()
- } else {
- t = p.errHandler.RecoverInline(p)
- if p.BuildParseTrees && t.GetTokenIndex() == -1 {
- // we must have conjured up a Newtoken during single token
- // insertion
- // if it's not the current symbol
- p.ctx.AddErrorNode(t)
- }
- }
-
- return t
-}
-
-// Match current input symbol as a wildcard. If the symbol type Matches
-// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
-// and {@link //consume} are called to complete the Match process.
-//
-// If the symbol type does not Match,
-// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
-// strategy to attempt recovery. If {@link //getBuildParseTree} is
-// {@code true} and the token index of the symbol returned by
-// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
-// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
-//
-// @return the Matched symbol
-// @panics RecognitionException if the current input symbol did not Match
-// a wildcard and the error strategy could not recover from the mismatched
-// symbol
-
-func (p *BaseParser) MatchWildcard() Token {
- t := p.GetCurrentToken()
- if t.GetTokenType() > 0 {
- p.errHandler.ReportMatch(p)
- p.Consume()
- } else {
- t = p.errHandler.RecoverInline(p)
- if p.BuildParseTrees && t.GetTokenIndex() == -1 {
- // we must have conjured up a Newtoken during single token
- // insertion
- // if it's not the current symbol
- p.ctx.AddErrorNode(t)
- }
- }
- return t
-}
-
-func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
- return p.ctx
-}
-
-func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
- p.ctx = v
-}
-
-func (p *BaseParser) GetParseListeners() []ParseTreeListener {
- if p.parseListeners == nil {
- return make([]ParseTreeListener, 0)
- }
- return p.parseListeners
-}
-
-// Registers {@code listener} to receive events during the parsing process.
-//
-// To support output-preserving grammar transformations (including but not
-// limited to left-recursion removal, automated left-factoring, and
-// optimized code generation), calls to listener methods during the parse
-// may differ substantially from calls made by
-// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
-// particular, rule entry and exit events may occur in a different order
-// during the parse than after the parser. In addition, calls to certain
-// rule entry methods may be omitted.
-//
-// With the following specific exceptions, calls to listener events are
-// deterministic, i.e. for identical input the calls to listener
-// methods will be the same.
-//
-//
-// - Alterations to the grammar used to generate code may change the
-// behavior of the listener calls.
-// - Alterations to the command line options passed to ANTLR 4 when
-// generating the parser may change the behavior of the listener calls.
-// - Changing the version of the ANTLR Tool used to generate the parser
-// may change the behavior of the listener calls.
-//
-//
-// @param listener the listener to add
-//
-// @panics nilPointerException if {@code} listener is {@code nil}
-func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
- if listener == nil {
- panic("listener")
- }
- if p.parseListeners == nil {
- p.parseListeners = make([]ParseTreeListener, 0)
- }
- p.parseListeners = append(p.parseListeners, listener)
-}
-
-// Remove {@code listener} from the list of parse listeners.
-//
-// If {@code listener} is {@code nil} or has not been added as a parse
-// listener, p.method does nothing.
-// @param listener the listener to remove
-func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
-
- if p.parseListeners != nil {
-
- idx := -1
- for i, v := range p.parseListeners {
- if v == listener {
- idx = i
- break
- }
- }
-
- if idx == -1 {
- return
- }
-
- // remove the listener from the slice
- p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
-
- if len(p.parseListeners) == 0 {
- p.parseListeners = nil
- }
- }
-}
-
-// Remove all parse listeners.
-func (p *BaseParser) removeParseListeners() {
- p.parseListeners = nil
-}
-
-// Notify any parse listeners of an enter rule event.
-func (p *BaseParser) TriggerEnterRuleEvent() {
- if p.parseListeners != nil {
- ctx := p.ctx
- for _, listener := range p.parseListeners {
- listener.EnterEveryRule(ctx)
- ctx.EnterRule(listener)
- }
- }
-}
-
-// Notify any parse listeners of an exit rule event.
-//
-// @see //addParseListener
-func (p *BaseParser) TriggerExitRuleEvent() {
- if p.parseListeners != nil {
- // reverse order walk of listeners
- ctx := p.ctx
- l := len(p.parseListeners) - 1
-
- for i := range p.parseListeners {
- listener := p.parseListeners[l-i]
- ctx.ExitRule(listener)
- listener.ExitEveryRule(ctx)
- }
- }
-}
-
-func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
- return p.Interpreter
-}
-
-func (p *BaseParser) GetATN() *ATN {
- return p.Interpreter.atn
-}
-
-func (p *BaseParser) GetTokenFactory() TokenFactory {
- return p.input.GetTokenSource().GetTokenFactory()
-}
-
-// Tell our token source and error strategy about a Newway to create tokens.//
-func (p *BaseParser) setTokenFactory(factory TokenFactory) {
- p.input.GetTokenSource().setTokenFactory(factory)
-}
-
-// The ATN with bypass alternatives is expensive to create so we create it
-// lazily.
-//
-// @panics UnsupportedOperationException if the current parser does not
-// implement the {@link //getSerializedATN()} method.
-func (p *BaseParser) GetATNWithBypassAlts() {
-
- // TODO
- panic("Not implemented!")
-
- // serializedAtn := p.getSerializedATN()
- // if (serializedAtn == nil) {
- // panic("The current parser does not support an ATN with bypass alternatives.")
- // }
- // result := p.bypassAltsAtnCache[serializedAtn]
- // if (result == nil) {
- // deserializationOptions := NewATNDeserializationOptions(nil)
- // deserializationOptions.generateRuleBypassTransitions = true
- // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
- // p.bypassAltsAtnCache[serializedAtn] = result
- // }
- // return result
-}
-
-// The preferred method of getting a tree pattern. For example, here's a
-// sample use:
-//
-//
-// ParseTree t = parser.expr()
-// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
-// MyParser.RULE_expr)
-// ParseTreeMatch m = p.Match(t)
-// String id = m.Get("ID")
-//
-
-func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
-
- panic("NewParseTreePatternMatcher not implemented!")
- //
- // if (lexer == nil) {
- // if (p.GetTokenStream() != nil) {
- // tokenSource := p.GetTokenStream().GetTokenSource()
- // if _, ok := tokenSource.(ILexer); ok {
- // lexer = tokenSource
- // }
- // }
- // }
- // if (lexer == nil) {
- // panic("Parser can't discover a lexer to use")
- // }
-
- // m := NewParseTreePatternMatcher(lexer, p)
- // return m.compile(pattern, patternRuleIndex)
-}
-
-func (p *BaseParser) GetInputStream() IntStream {
- return p.GetTokenStream()
-}
-
-func (p *BaseParser) SetInputStream(input TokenStream) {
- p.SetTokenStream(input)
-}
-
-func (p *BaseParser) GetTokenStream() TokenStream {
- return p.input
-}
-
-// Set the token stream and reset the parser.//
-func (p *BaseParser) SetTokenStream(input TokenStream) {
- p.input = nil
- p.reset()
- p.input = input
-}
-
-// Match needs to return the current input symbol, which gets put
-// into the label for the associated token ref e.g., x=ID.
-func (p *BaseParser) GetCurrentToken() Token {
- return p.input.LT(1)
-}
-
-func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
- if offendingToken == nil {
- offendingToken = p.GetCurrentToken()
- }
- p._SyntaxErrors++
- line := offendingToken.GetLine()
- column := offendingToken.GetColumn()
- listener := p.GetErrorListenerDispatch()
- listener.SyntaxError(p, offendingToken, line, column, msg, err)
-}
-
-func (p *BaseParser) Consume() Token {
- o := p.GetCurrentToken()
- if o.GetTokenType() != TokenEOF {
- p.GetInputStream().Consume()
- }
- hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
- if p.BuildParseTrees || hasListener {
- if p.errHandler.InErrorRecoveryMode(p) {
- node := p.ctx.AddErrorNode(o)
- if p.parseListeners != nil {
- for _, l := range p.parseListeners {
- l.VisitErrorNode(node)
- }
- }
-
- } else {
- node := p.ctx.AddTokenNode(o)
- if p.parseListeners != nil {
- for _, l := range p.parseListeners {
- l.VisitTerminal(node)
- }
- }
- }
- // node.invokingState = p.state
- }
-
- return o
-}
-
-func (p *BaseParser) addContextToParseTree() {
- // add current context to parent if we have a parent
- if p.ctx.GetParent() != nil {
- p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
- }
-}
-
-func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
- p.SetState(state)
- p.ctx = localctx
- p.ctx.SetStart(p.input.LT(1))
- if p.BuildParseTrees {
- p.addContextToParseTree()
- }
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent()
- }
-}
-
-func (p *BaseParser) ExitRule() {
- p.ctx.SetStop(p.input.LT(-1))
- // trigger event on ctx, before it reverts to parent
- if p.parseListeners != nil {
- p.TriggerExitRuleEvent()
- }
- p.SetState(p.ctx.GetInvokingState())
- if p.ctx.GetParent() != nil {
- p.ctx = p.ctx.GetParent().(ParserRuleContext)
- } else {
- p.ctx = nil
- }
-}
-
-func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
- localctx.SetAltNumber(altNum)
- // if we have Newlocalctx, make sure we replace existing ctx
- // that is previous child of parse tree
- if p.BuildParseTrees && p.ctx != localctx {
- if p.ctx.GetParent() != nil {
- p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
- p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
- }
- }
- p.ctx = localctx
-}
-
-// Get the precedence level for the top-most precedence rule.
-//
-// @return The precedence level for the top-most precedence rule, or -1 if
-// the parser context is not nested within a precedence rule.
-
-func (p *BaseParser) GetPrecedence() int {
- if len(p.precedenceStack) == 0 {
- return -1
- }
-
- return p.precedenceStack[len(p.precedenceStack)-1]
-}
-
-func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
- p.SetState(state)
- p.precedenceStack.Push(precedence)
- p.ctx = localctx
- p.ctx.SetStart(p.input.LT(1))
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent() // simulates rule entry for
- // left-recursive rules
- }
-}
-
-//
-// Like {@link //EnterRule} but for recursive rules.
-
-func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
- previous := p.ctx
- previous.SetParent(localctx)
- previous.SetInvokingState(state)
- previous.SetStop(p.input.LT(-1))
-
- p.ctx = localctx
- p.ctx.SetStart(previous.GetStart())
- if p.BuildParseTrees {
- p.ctx.AddChild(previous)
- }
- if p.parseListeners != nil {
- p.TriggerEnterRuleEvent() // simulates rule entry for
- // left-recursive rules
- }
-}
-
-func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
- p.precedenceStack.Pop()
- p.ctx.SetStop(p.input.LT(-1))
- retCtx := p.ctx // save current ctx (return value)
- // unroll so ctx is as it was before call to recursive method
- if p.parseListeners != nil {
- for p.ctx != parentCtx {
- p.TriggerExitRuleEvent()
- p.ctx = p.ctx.GetParent().(ParserRuleContext)
- }
- } else {
- p.ctx = parentCtx
- }
- // hook into tree
- retCtx.SetParent(parentCtx)
- if p.BuildParseTrees && parentCtx != nil {
- // add return ctx into invoking rule's tree
- parentCtx.AddChild(retCtx)
- }
-}
-
-func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
- ctx := p.ctx
- for ctx != nil {
- if ctx.GetRuleIndex() == ruleIndex {
- return ctx
- }
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- return nil
-}
-
-func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
- return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
-}
-
-func (p *BaseParser) inContext(context ParserRuleContext) bool {
- // TODO: useful in parser?
- return false
-}
-
-//
-// Checks whether or not {@code symbol} can follow the current state in the
-// ATN. The behavior of p.method is equivalent to the following, but is
-// implemented such that the complete context-sensitive follow set does not
-// need to be explicitly constructed.
-//
-//
-// return getExpectedTokens().contains(symbol)
-//
-//
-// @param symbol the symbol type to check
-// @return {@code true} if {@code symbol} can follow the current state in
-// the ATN, otherwise {@code false}.
-
-func (p *BaseParser) IsExpectedToken(symbol int) bool {
- atn := p.Interpreter.atn
- ctx := p.ctx
- s := atn.states[p.state]
- following := atn.NextTokens(s, nil)
- if following.contains(symbol) {
- return true
- }
- if !following.contains(TokenEpsilon) {
- return false
- }
- for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
- invokingState := atn.states[ctx.GetInvokingState()]
- rt := invokingState.GetTransitions()[0]
- following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
- if following.contains(symbol) {
- return true
- }
- ctx = ctx.GetParent().(ParserRuleContext)
- }
- if following.contains(TokenEpsilon) && symbol == TokenEOF {
- return true
- }
-
- return false
-}
-
-// Computes the set of input symbols which could follow the current parser
-// state and context, as given by {@link //GetState} and {@link //GetContext},
-// respectively.
-//
-// @see ATN//getExpectedTokens(int, RuleContext)
-func (p *BaseParser) GetExpectedTokens() *IntervalSet {
- return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
-}
-
-func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
- atn := p.Interpreter.atn
- s := atn.states[p.state]
- return atn.NextTokens(s, nil)
-}
-
-// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
-func (p *BaseParser) GetRuleIndex(ruleName string) int {
- var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
- if ok {
- return ruleIndex
- }
-
- return -1
-}
-
-// Return List<String> of the rule names in your parser instance
-// leading up to a call to the current rule. You could override if
-// you want more details such as the file/line info of where
-// in the ATN a rule is invoked.
-//
-// this very useful for error messages.
-
-func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
- if c == nil {
- c = p.ctx
- }
- stack := make([]string, 0)
- for c != nil {
- // compute what follows who invoked us
- ruleIndex := c.GetRuleIndex()
- if ruleIndex < 0 {
- stack = append(stack, "n/a")
- } else {
- stack = append(stack, p.GetRuleNames()[ruleIndex])
- }
-
- vp := c.GetParent()
-
- if vp == nil {
- break
- }
-
- c = vp.(ParserRuleContext)
- }
- return stack
-}
-
-// For debugging and other purposes.//
-func (p *BaseParser) GetDFAStrings() string {
- return fmt.Sprint(p.Interpreter.decisionToDFA)
-}
-
-// For debugging and other purposes.//
-func (p *BaseParser) DumpDFA() {
- seenOne := false
- for _, dfa := range p.Interpreter.decisionToDFA {
- if dfa.states.Len() > 0 {
- if seenOne {
- fmt.Println()
- }
- fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
- fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
- seenOne = true
- }
- }
-}
-
-func (p *BaseParser) GetSourceName() string {
- return p.GrammarFileName
-}
-
-// During a parse is sometimes useful to listen in on the rule entry and exit
-// events as well as token Matches. p.is for quick and dirty debugging.
-func (p *BaseParser) SetTrace(trace *TraceListener) {
- if trace == nil {
- p.RemoveParseListener(p.tracer)
- p.tracer = nil
- } else {
- if p.tracer != nil {
- p.RemoveParseListener(p.tracer)
- }
- p.tracer = NewTraceListener(p)
- p.AddParseListener(p.tracer)
- }
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
deleted file mode 100644
index 8bcc46a0d..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go
+++ /dev/null
@@ -1,1559 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-var (
- ParserATNSimulatorDebug = false
- ParserATNSimulatorTraceATNSim = false
- ParserATNSimulatorDFADebug = false
- ParserATNSimulatorRetryDebug = false
- TurnOffLRLoopEntryBranchOpt = false
-)
-
-type ParserATNSimulator struct {
- *BaseATNSimulator
-
- parser Parser
- predictionMode int
- input TokenStream
- startIndex int
- dfa *DFA
- mergeCache *DoubleDict
- outerContext ParserRuleContext
-}
-
-func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
-
- p := new(ParserATNSimulator)
-
- p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
-
- p.parser = parser
- p.decisionToDFA = decisionToDFA
- // SLL, LL, or LL + exact ambig detection?//
- p.predictionMode = PredictionModeLL
- // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
- p.input = nil
- p.startIndex = 0
- p.outerContext = nil
- p.dfa = nil
- // Each prediction operation uses a cache for merge of prediction contexts.
- // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
- // isn't Synchronized but we're ok since two threads shouldn't reuse same
- // parser/atnsim object because it can only handle one input at a time.
- // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
- // the merge if we ever see a and b again. Note that (b,a)&rarrc should
- // also be examined during cache lookup.
- //
- p.mergeCache = nil
-
- return p
-}
-
-func (p *ParserATNSimulator) GetPredictionMode() int {
- return p.predictionMode
-}
-
-func (p *ParserATNSimulator) SetPredictionMode(v int) {
- p.predictionMode = v
-}
-
-func (p *ParserATNSimulator) reset() {
-}
-
-func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
- if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
- fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
- " exec LA(1)==" + p.getLookaheadName(input) +
- " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
- strconv.Itoa(input.LT(1).GetColumn()))
- }
-
- p.input = input
- p.startIndex = input.Index()
- p.outerContext = outerContext
-
- dfa := p.decisionToDFA[decision]
- p.dfa = dfa
- m := input.Mark()
- index := input.Index()
-
- defer func() {
- p.dfa = nil
- p.mergeCache = nil // wack cache after each prediction
- input.Seek(index)
- input.Release(m)
- }()
-
- // Now we are certain to have a specific decision's DFA
- // But, do we still need an initial state?
- var s0 *DFAState
- p.atn.stateMu.RLock()
- if dfa.getPrecedenceDfa() {
- p.atn.edgeMu.RLock()
- // the start state for a precedence DFA depends on the current
- // parser precedence, and is provided by a DFA method.
- s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
- p.atn.edgeMu.RUnlock()
- } else {
- // the start state for a "regular" DFA is just s0
- s0 = dfa.getS0()
- }
- p.atn.stateMu.RUnlock()
-
- if s0 == nil {
- if outerContext == nil {
- outerContext = ParserRuleContextEmpty
- }
- if ParserATNSimulatorDebug {
- fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
- " exec LA(1)==" + p.getLookaheadName(input) +
- ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
- }
- fullCtx := false
- s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
-
- p.atn.stateMu.Lock()
- if dfa.getPrecedenceDfa() {
- // If p is a precedence DFA, we use applyPrecedenceFilter
- // to convert the computed start state to a precedence start
- // state. We then use DFA.setPrecedenceStartState to set the
- // appropriate start state for the precedence level rather
- // than simply setting DFA.s0.
- //
- dfa.s0.configs = s0Closure
- s0Closure = p.applyPrecedenceFilter(s0Closure)
- s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
- p.atn.edgeMu.Lock()
- dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
- p.atn.edgeMu.Unlock()
- } else {
- s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
- dfa.setS0(s0)
- }
- p.atn.stateMu.Unlock()
- }
-
- alt := p.execATN(dfa, s0, input, index, outerContext)
- if ParserATNSimulatorDebug {
- fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
- }
- return alt
-
-}
-
-// Performs ATN simulation to compute a predicted alternative based
-// upon the remaining input, but also updates the DFA cache to avoid
-// having to traverse the ATN again for the same input sequence.
-
-// There are some key conditions we're looking for after computing a new
-// set of ATN configs (proposed DFA state):
-// if the set is empty, there is no viable alternative for current symbol
-// does the state uniquely predict an alternative?
-// does the state have a conflict that would prevent us from
-// putting it on the work list?
-
-// We also have some key operations to do:
-// add an edge from previous DFA state to potentially NewDFA state, D,
-// upon current symbol but only if adding to work list, which means in all
-// cases except no viable alternative (and possibly non-greedy decisions?)
-// collecting predicates and adding semantic context to DFA accept states
-// adding rule context to context-sensitive DFA accept states
-// consuming an input symbol
-// Reporting a conflict
-// Reporting an ambiguity
-// Reporting a context sensitivity
-// Reporting insufficient predicates
-
-// cover these cases:
-//
-// dead end
-// single alt
-// single alt + preds
-// conflict
-// conflict + preds
-func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
-
- if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
- fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
- ", DFA state " + s0.String() +
- ", LA(1)==" + p.getLookaheadName(input) +
- " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
- }
-
- previousD := s0
-
- if ParserATNSimulatorDebug {
- fmt.Println("s0 = " + s0.String())
- }
- t := input.LA(1)
- for { // for more work
- D := p.getExistingTargetState(previousD, t)
- if D == nil {
- D = p.computeTargetState(dfa, previousD, t)
- }
- if D == ATNSimulatorError {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for SLL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
- input.Seek(startIndex)
- alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
- if alt != ATNInvalidAltNumber {
- return alt
- }
-
- panic(e)
- }
- if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
- // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
- conflictingAlts := D.configs.GetConflictingAlts()
- if D.predicates != nil {
- if ParserATNSimulatorDebug {
- fmt.Println("DFA state has preds in DFA sim LL failover")
- }
- conflictIndex := input.Index()
- if conflictIndex != startIndex {
- input.Seek(startIndex)
- }
- conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
- if conflictingAlts.length() == 1 {
- if ParserATNSimulatorDebug {
- fmt.Println("Full LL avoided")
- }
- return conflictingAlts.minValue()
- }
- if conflictIndex != startIndex {
- // restore the index so Reporting the fallback to full
- // context occurs with the index at the correct spot
- input.Seek(conflictIndex)
- }
- }
- if ParserATNSimulatorDFADebug {
- fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
- }
- fullCtx := true
- s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
- p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
- alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
- return alt
- }
- if D.isAcceptState {
- if D.predicates == nil {
- return D.prediction
- }
- stopIndex := input.Index()
- input.Seek(startIndex)
- alts := p.evalSemanticContext(D.predicates, outerContext, true)
-
- switch alts.length() {
- case 0:
- panic(p.noViableAlt(input, outerContext, D.configs, startIndex))
- case 1:
- return alts.minValue()
- default:
- // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
- p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
- return alts.minValue()
- }
- }
- previousD = D
-
- if t != TokenEOF {
- input.Consume()
- t = input.LA(1)
- }
- }
-}
-
-// Get an existing target state for an edge in the DFA. If the target state
-// for the edge has not yet been computed or is otherwise not available,
-// p method returns {@code nil}.
-//
-// @param previousD The current DFA state
-// @param t The next input symbol
-// @return The existing target DFA state for the given input symbol
-// {@code t}, or {@code nil} if the target state for p edge is not
-// already cached
-
-func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
- if t+1 < 0 {
- return nil
- }
-
- p.atn.edgeMu.RLock()
- defer p.atn.edgeMu.RUnlock()
- edges := previousD.getEdges()
- if edges == nil || t+1 >= len(edges) {
- return nil
- }
- return previousD.getIthEdge(t + 1)
-}
-
-// Compute a target state for an edge in the DFA, and attempt to add the
-// computed state and corresponding edge to the DFA.
-//
-// @param dfa The DFA
-// @param previousD The current DFA state
-// @param t The next input symbol
-//
-// @return The computed target DFA state for the given input symbol
-// {@code t}. If {@code t} does not lead to a valid DFA state, p method
-// returns {@link //ERROR}.
-
-func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
- reach := p.computeReachSet(previousD.configs, t, false)
-
- if reach == nil {
- p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
- return ATNSimulatorError
- }
- // create Newtarget state we'll add to DFA after it's complete
- D := NewDFAState(-1, reach)
-
- predictedAlt := p.getUniqueAlt(reach)
-
- if ParserATNSimulatorDebug {
- altSubSets := PredictionModegetConflictingAltSubsets(reach)
- fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
- ", previous=" + previousD.configs.String() +
- ", configs=" + reach.String() +
- ", predict=" + strconv.Itoa(predictedAlt) +
- ", allSubsetsConflict=" +
- fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
- ", conflictingAlts=" + p.getConflictingAlts(reach).String())
- }
- if predictedAlt != ATNInvalidAltNumber {
- // NO CONFLICT, UNIQUELY PREDICTED ALT
- D.isAcceptState = true
- D.configs.SetUniqueAlt(predictedAlt)
- D.setPrediction(predictedAlt)
- } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
- // MORE THAN ONE VIABLE ALTERNATIVE
- D.configs.SetConflictingAlts(p.getConflictingAlts(reach))
- D.requiresFullContext = true
- // in SLL-only mode, we will stop at p state and return the minimum alt
- D.isAcceptState = true
- D.setPrediction(D.configs.GetConflictingAlts().minValue())
- }
- if D.isAcceptState && D.configs.HasSemanticContext() {
- p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
- if D.predicates != nil {
- D.setPrediction(ATNInvalidAltNumber)
- }
- }
- // all adds to dfa are done after we've created full D state
- D = p.addDFAEdge(dfa, previousD, t, D)
- return D
-}
-
-func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
- // We need to test all predicates, even in DFA states that
- // uniquely predict alternative.
- nalts := len(decisionState.GetTransitions())
- // Update DFA so reach becomes accept state with (predicate,alt)
- // pairs if preds found for conflicting alts
- altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
- altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
- if altToPred != nil {
- dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
- dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
- } else {
- // There are preds in configs but they might go away
- // when OR'd together like {p}? || NONE == NONE. If neither
- // alt has preds, resolve to min alt
- dfaState.setPrediction(altsToCollectPredsFrom.minValue())
- }
-}
-
-// comes back with reach.uniqueAlt set to a valid alt
-func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
-
- if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
- fmt.Println("execATNWithFullContext " + s0.String())
- }
-
- fullCtx := true
- foundExactAmbig := false
- var reach ATNConfigSet
- previous := s0
- input.Seek(startIndex)
- t := input.LA(1)
- predictedAlt := -1
-
- for { // for more work
- reach = p.computeReachSet(previous, t, fullCtx)
- if reach == nil {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for LL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- e := p.noViableAlt(input, outerContext, previous, startIndex)
- input.Seek(startIndex)
- alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
- if alt != ATNInvalidAltNumber {
- return alt
- }
-
- panic(e)
- }
- altSubSets := PredictionModegetConflictingAltSubsets(reach)
- if ParserATNSimulatorDebug {
- fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
- strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
- fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
- }
- reach.SetUniqueAlt(p.getUniqueAlt(reach))
- // unique prediction?
- if reach.GetUniqueAlt() != ATNInvalidAltNumber {
- predictedAlt = reach.GetUniqueAlt()
- break
- }
- if p.predictionMode != PredictionModeLLExactAmbigDetection {
- predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
- if predictedAlt != ATNInvalidAltNumber {
- break
- }
- } else {
- // In exact ambiguity mode, we never try to terminate early.
- // Just keeps scarfing until we know what the conflict is
- if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
- foundExactAmbig = true
- predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
- break
- }
- // else there are multiple non-conflicting subsets or
- // we're not sure what the ambiguity is yet.
- // So, keep going.
- }
- previous = reach
- if t != TokenEOF {
- input.Consume()
- t = input.LA(1)
- }
- }
- // If the configuration set uniquely predicts an alternative,
- // without conflict, then we know that it's a full LL decision
- // not SLL.
- if reach.GetUniqueAlt() != ATNInvalidAltNumber {
- p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
- return predictedAlt
- }
- // We do not check predicates here because we have checked them
- // on-the-fly when doing full context prediction.
-
- //
- // In non-exact ambiguity detection mode, we might actually be able to
- // detect an exact ambiguity, but I'm not going to spend the cycles
- // needed to check. We only emit ambiguity warnings in exact ambiguity
- // mode.
- //
- // For example, we might know that we have conflicting configurations.
- // But, that does not mean that there is no way forward without a
- // conflict. It's possible to have nonconflicting alt subsets as in:
-
- // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
-
- // from
- //
- // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
- // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
- //
- // In p case, (17,1,[5 $]) indicates there is some next sequence that
- // would resolve p without conflict to alternative 1. Any other viable
- // next sequence, however, is associated with a conflict. We stop
- // looking for input because no amount of further lookahead will alter
- // the fact that we should predict alternative 1. We just can't say for
- // sure that there is an ambiguity without looking further.
-
- p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
-
- return predictedAlt
-}
-
-func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
- if p.mergeCache == nil {
- p.mergeCache = NewDoubleDict()
- }
- intermediate := NewBaseATNConfigSet(fullCtx)
-
- // Configurations already in a rule stop state indicate reaching the end
- // of the decision rule (local context) or end of the start rule (full
- // context). Once reached, these configurations are never updated by a
- // closure operation, so they are handled separately for the performance
- // advantage of having a smaller intermediate set when calling closure.
- //
- // For full-context reach operations, separate handling is required to
- // ensure that the alternative Matching the longest overall sequence is
- // chosen when multiple such configurations can Match the input.
-
- var skippedStopStates []*BaseATNConfig
-
- // First figure out where we can reach on input t
- for _, c := range closure.GetItems() {
- if ParserATNSimulatorDebug {
- fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
- }
-
- if _, ok := c.GetState().(*RuleStopState); ok {
- if fullCtx || t == TokenEOF {
- skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig))
- if ParserATNSimulatorDebug {
- fmt.Println("added " + c.String() + " to SkippedStopStates")
- }
- }
- continue
- }
-
- for _, trans := range c.GetState().GetTransitions() {
- target := p.getReachableTarget(trans, t)
- if target != nil {
- cfg := NewBaseATNConfig4(c, target)
- intermediate.Add(cfg, p.mergeCache)
- if ParserATNSimulatorDebug {
- fmt.Println("added " + cfg.String() + " to intermediate")
- }
- }
- }
- }
-
- // Now figure out where the reach operation can take us...
- var reach ATNConfigSet
-
- // This block optimizes the reach operation for intermediate sets which
- // trivially indicate a termination state for the overall
- // AdaptivePredict operation.
- //
- // The conditions assume that intermediate
- // contains all configurations relevant to the reach set, but p
- // condition is not true when one or more configurations have been
- // withheld in SkippedStopStates, or when the current symbol is EOF.
- //
- if skippedStopStates == nil && t != TokenEOF {
- if len(intermediate.configs) == 1 {
- // Don't pursue the closure if there is just one state.
- // It can only have one alternative just add to result
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = intermediate
- } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = intermediate
- }
- }
- // If the reach set could not be trivially determined, perform a closure
- // operation on the intermediate set to compute its initial value.
- //
- if reach == nil {
- reach = NewBaseATNConfigSet(fullCtx)
- closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst)
- treatEOFAsEpsilon := t == TokenEOF
- amount := len(intermediate.configs)
- for k := 0; k < amount; k++ {
- p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
- }
- }
- if t == TokenEOF {
- // After consuming EOF no additional input is possible, so we are
- // only interested in configurations which reached the end of the
- // decision rule (local context) or end of the start rule (full
- // context). Update reach to contain only these configurations. This
- // handles both explicit EOF transitions in the grammar and implicit
- // EOF transitions following the end of the decision or start rule.
- //
- // When reach==intermediate, no closure operation was performed. In
- // p case, removeAllConfigsNotInRuleStopState needs to check for
- // reachable rule stop states as well as configurations already in
- // a rule stop state.
- //
- // This is handled before the configurations in SkippedStopStates,
- // because any configurations potentially added from that list are
- // already guaranteed to meet p condition whether or not it's
- // required.
- //
- reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate)
- }
- // If SkippedStopStates!=nil, then it contains at least one
- // configuration. For full-context reach operations, these
- // configurations reached the end of the start rule, in which case we
- // only add them back to reach if no configuration during the current
- // closure operation reached such a state. This ensures AdaptivePredict
- // chooses an alternative Matching the longest overall sequence when
- // multiple alternatives are viable.
- //
- if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
- for l := 0; l < len(skippedStopStates); l++ {
- reach.Add(skippedStopStates[l], p.mergeCache)
- }
- }
-
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
- }
-
- if len(reach.GetItems()) == 0 {
- return nil
- }
-
- return reach
-}
-
-// Return a configuration set containing only the configurations from
-// {@code configs} which are in a {@link RuleStopState}. If all
-// configurations in {@code configs} are already in a rule stop state, p
-// method simply returns {@code configs}.
-//
-// When {@code lookToEndOfRule} is true, p method uses
-// {@link ATN//NextTokens} for each configuration in {@code configs} which is
-// not already in a rule stop state to see if a rule stop state is reachable
-// from the configuration via epsilon-only transitions.
-//
-// @param configs the configuration set to update
-// @param lookToEndOfRule when true, p method checks for rule stop states
-// reachable by epsilon-only transitions from each configuration in
-// {@code configs}.
-//
-// @return {@code configs} if all configurations in {@code configs} are in a
-// rule stop state, otherwise return a Newconfiguration set containing only
-// the configurations from {@code configs} which are in a rule stop state
-func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
- if PredictionModeallConfigsInRuleStopStates(configs) {
- return configs
- }
- result := NewBaseATNConfigSet(configs.FullContext())
- for _, config := range configs.GetItems() {
- if _, ok := config.GetState().(*RuleStopState); ok {
- result.Add(config, p.mergeCache)
- continue
- }
- if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
- NextTokens := p.atn.NextTokens(config.GetState(), nil)
- if NextTokens.contains(TokenEpsilon) {
- endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
- result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache)
- }
- }
- }
- return result
-}
-
-func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet {
- // always at least the implicit call to start rule
- initialContext := predictionContextFromRuleContext(p.atn, ctx)
- configs := NewBaseATNConfigSet(fullCtx)
- if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim {
- fmt.Println("computeStartState from ATN state " + a.String() +
- " initialContext=" + initialContext.String())
- }
-
- for i := 0; i < len(a.GetTransitions()); i++ {
- target := a.GetTransitions()[i].getTarget()
- c := NewBaseATNConfig6(target, i+1, initialContext)
- closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst)
- p.closure(c, configs, closureBusy, true, fullCtx, false)
- }
- return configs
-}
-
-// This method transforms the start state computed by
-// {@link //computeStartState} to the special start state used by a
-// precedence DFA for a particular precedence value. The transformation
-// process applies the following changes to the start state's configuration
-// set.
-//
-//
-// - Evaluate the precedence predicates for each configuration using
-// {@link SemanticContext//evalPrecedence}.
-// - Remove all configurations which predict an alternative greater than
-// 1, for which another configuration that predicts alternative 1 is in the
-// same ATN state with the same prediction context. This transformation is
-// valid for the following reasons:
-//
-// - The closure block cannot contain any epsilon transitions which bypass
-// the body of the closure, so all states reachable via alternative 1 are
-// part of the precedence alternatives of the transformed left-recursive
-// rule.
-// - The "primary" portion of a left recursive rule cannot contain an
-// epsilon transition, so the only way an alternative other than 1 can exist
-// in a state that is also reachable via alternative 1 is by nesting calls
-// to the left-recursive rule, with the outer calls not being at the
-// preferred precedence level.
-//
-//
-//
-//
-//
-// The prediction context must be considered by p filter to address
-// situations like the following.
-//
-//
-//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-//
-//
-//
-// If the above grammar, the ATN state immediately before the token
-// reference {@code 'a'} in {@code letterA} is reachable from the left edge
-// of both the primary and closure blocks of the left-recursive rule
-// {@code statement}. The prediction context associated with each of these
-// configurations distinguishes between them, and prevents the alternative
-// which stepped out to {@code prog} (and then back in to {@code statement}
-// from being eliminated by the filter.
-//
-//
-// @param configs The configuration set computed by
-// {@link //computeStartState} as the start state for the DFA.
-// @return The transformed configuration set representing the start state
-// for a precedence DFA at a particular precedence level (determined by
-// calling {@link Parser//getPrecedence}).
-func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
-
- statesFromAlt1 := make(map[int]PredictionContext)
- configSet := NewBaseATNConfigSet(configs.FullContext())
-
- for _, config := range configs.GetItems() {
- // handle alt 1 first
- if config.GetAlt() != 1 {
- continue
- }
- updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
- if updatedContext == nil {
- // the configuration was eliminated
- continue
- }
- statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
- if updatedContext != config.GetSemanticContext() {
- configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache)
- } else {
- configSet.Add(config, p.mergeCache)
- }
- }
- for _, config := range configs.GetItems() {
-
- if config.GetAlt() == 1 {
- // already handled
- continue
- }
- // In the future, p elimination step could be updated to also
- // filter the prediction context for alternatives predicting alt>1
- // (basically a graph subtraction algorithm).
- if !config.getPrecedenceFilterSuppressed() {
- context := statesFromAlt1[config.GetState().GetStateNumber()]
- if context != nil && context.Equals(config.GetContext()) {
- // eliminated
- continue
- }
- }
- configSet.Add(config, p.mergeCache)
- }
- return configSet
-}
-
-func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
- if trans.Matches(ttype, 0, p.atn.maxTokenType) {
- return trans.getTarget()
- }
-
- return nil
-}
-
-func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext {
-
- altToPred := make([]SemanticContext, nalts+1)
- for _, c := range configs.GetItems() {
- if ambigAlts.contains(c.GetAlt()) {
- altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
- }
- }
- nPredAlts := 0
- for i := 1; i <= nalts; i++ {
- pred := altToPred[i]
- if pred == nil {
- altToPred[i] = SemanticContextNone
- } else if pred != SemanticContextNone {
- nPredAlts++
- }
- }
- // nonambig alts are nil in altToPred
- if nPredAlts == 0 {
- altToPred = nil
- }
- if ParserATNSimulatorDebug {
- fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
- }
- return altToPred
-}
-
-func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
- pairs := make([]*PredPrediction, 0)
- containsPredicate := false
- for i := 1; i < len(altToPred); i++ {
- pred := altToPred[i]
- // unpredicated is indicated by SemanticContextNONE
- if ambigAlts != nil && ambigAlts.contains(i) {
- pairs = append(pairs, NewPredPrediction(pred, i))
- }
- if pred != SemanticContextNone {
- containsPredicate = true
- }
- }
- if !containsPredicate {
- return nil
- }
- return pairs
-}
-
-// This method is used to improve the localization of error messages by
-// choosing an alternative rather than panicing a
-// {@link NoViableAltException} in particular prediction scenarios where the
-// {@link //ERROR} state was reached during ATN simulation.
-//
-//
-// The default implementation of p method uses the following
-// algorithm to identify an ATN configuration which successfully parsed the
-// decision entry rule. Choosing such an alternative ensures that the
-// {@link ParserRuleContext} returned by the calling rule will be complete
-// and valid, and the syntax error will be Reported later at a more
-// localized location.
-//
-//
-// - If a syntactically valid path or paths reach the end of the decision rule and
-// they are semantically valid if predicated, return the min associated alt.
-// - Else, if a semantically invalid but syntactically valid path exist
-// or paths exist, return the minimum associated alt.
-//
-// - Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
-//
-//
-//
-// In some scenarios, the algorithm described above could predict an
-// alternative which will result in a {@link FailedPredicateException} in
-// the parser. Specifically, p could occur if the only configuration
-// capable of successfully parsing to the end of the decision rule is
-// blocked by a semantic predicate. By choosing p alternative within
-// {@link //AdaptivePredict} instead of panicing a
-// {@link NoViableAltException}, the resulting
-// {@link FailedPredicateException} in the parser will identify the specific
-// predicate which is preventing the parser from successfully parsing the
-// decision rule, which helps developers identify and correct logic errors
-// in semantic predicates.
-//
-//
-// @param configs The ATN configurations which were valid immediately before
-// the {@link //ERROR} state was reached
-// @param outerContext The is the \gamma_0 initial parser context from the paper
-// or the parser stack at the instant before prediction commences.
-//
-// @return The value to return from {@link //AdaptivePredict}, or
-// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
-// identified and {@link //AdaptivePredict} should Report an error instead.
-func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
- cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
- semValidConfigs := cfgs[0]
- semInvalidConfigs := cfgs[1]
- alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
- if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
- return alt
- }
- // Is there a syntactically valid path with a failed pred?
- if len(semInvalidConfigs.GetItems()) > 0 {
- alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
- if alt != ATNInvalidAltNumber { // syntactically viable path exists
- return alt
- }
- }
- return ATNInvalidAltNumber
-}
-
-func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int {
- alts := NewIntervalSet()
-
- for _, c := range configs.GetItems() {
- _, ok := c.GetState().(*RuleStopState)
-
- if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
- alts.addOne(c.GetAlt())
- }
- }
- if alts.length() == 0 {
- return ATNInvalidAltNumber
- }
-
- return alts.first()
-}
-
-// Walk the list of configurations and split them according to
-// those that have preds evaluating to true/false. If no pred, assume
-// true pred and include in succeeded set. Returns Pair of sets.
-//
-// Create a NewSet so as not to alter the incoming parameter.
-//
-// Assumption: the input stream has been restored to the starting point
-// prediction, which is where predicates need to evaluate.
-
-type ATNConfigSetPair struct {
- item0, item1 ATNConfigSet
-}
-
-func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet {
- succeeded := NewBaseATNConfigSet(configs.FullContext())
- failed := NewBaseATNConfigSet(configs.FullContext())
-
- for _, c := range configs.GetItems() {
- if c.GetSemanticContext() != SemanticContextNone {
- predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
- if predicateEvaluationResult {
- succeeded.Add(c, nil)
- } else {
- failed.Add(c, nil)
- }
- } else {
- succeeded.Add(c, nil)
- }
- }
- return []ATNConfigSet{succeeded, failed}
-}
-
-// Look through a list of predicate/alt pairs, returning alts for the
-//
-// pairs that win. A {@code NONE} predicate indicates an alt containing an
-// unpredicated config which behaves as "always true." If !complete
-// then we stop at the first predicate that evaluates to true. This
-// includes pairs with nil predicates.
-func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
- predictions := NewBitSet()
- for i := 0; i < len(predPredictions); i++ {
- pair := predPredictions[i]
- if pair.pred == SemanticContextNone {
- predictions.add(pair.alt)
- if !complete {
- break
- }
- continue
- }
-
- predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
- if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
- fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
- }
- if predicateEvaluationResult {
- if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
- fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
- }
- predictions.add(pair.alt)
- if !complete {
- break
- }
- }
- }
- return predictions
-}
-
-func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
- initialDepth := 0
- p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
- fullCtx, initialDepth, treatEOFAsEpsilon)
-}
-
-func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("closure(" + config.String() + ")")
- //fmt.Println("configs(" + configs.String() + ")")
- if config.GetReachesIntoOuterContext() > 50 {
- panic("problem")
- }
- }
-
- if _, ok := config.GetState().(*RuleStopState); ok {
- // We hit rule end. If we have context info, use it
- // run thru all possible stack tops in ctx
- if !config.GetContext().isEmpty() {
- for i := 0; i < config.GetContext().length(); i++ {
- if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
- if fullCtx {
- configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache)
- continue
- } else {
- // we have no context info, just chase follow links (if greedy)
- if ParserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
- }
- p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
- }
- continue
- }
- returnState := p.atn.states[config.GetContext().getReturnState(i)]
- newContext := config.GetContext().GetParent(i) // "pop" return state
-
- c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
- // While we have context to pop back from, we may have
- // gotten that context AFTER having falling off a rule.
- // Make sure we track that we are now out of context.
- c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
- p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
- }
- return
- } else if fullCtx {
- // reached end of start rule
- configs.Add(config, p.mergeCache)
- return
- } else {
- // else if we have no context info, just chase follow links (if greedy)
- if ParserATNSimulatorDebug {
- fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
- }
- }
- }
- p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
-}
-
-// Do the actual work of walking epsilon edges//
-func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
- state := config.GetState()
- // optimization
- if !state.GetEpsilonOnlyTransitions() {
- configs.Add(config, p.mergeCache)
- // make sure to not return here, because EOF transitions can act as
- // both epsilon transitions and non-epsilon transitions.
- }
- for i := 0; i < len(state.GetTransitions()); i++ {
- if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
- continue
- }
-
- t := state.GetTransitions()[i]
- _, ok := t.(*ActionTransition)
- continueCollecting := collectPredicates && !ok
- c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
- if ci, ok := c.(*BaseATNConfig); ok && ci != nil {
- newDepth := depth
-
- if _, ok := config.GetState().(*RuleStopState); ok {
- // target fell off end of rule mark resulting c as having dipped into outer context
- // We can't get here if incoming config was rule stop and we had context
- // track how far we dip into outer context. Might
- // come in handy and we avoid evaluating context dependent
- // preds if p is > 0.
-
- if p.dfa != nil && p.dfa.getPrecedenceDfa() {
- if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
- c.setPrecedenceFilterSuppressed(true)
- }
- }
-
- c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
-
- _, present := closureBusy.Put(c)
- if present {
- // avoid infinite recursion for right-recursive rules
- continue
- }
-
- configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method
- newDepth--
- if ParserATNSimulatorDebug {
- fmt.Println("dips into outer ctx: " + c.String())
- }
- } else {
-
- if !t.getIsEpsilon() {
- _, present := closureBusy.Put(c)
- if present {
- // avoid infinite recursion for EOF* and EOF+
- continue
- }
- }
- if _, ok := t.(*RuleTransition); ok {
- // latch when newDepth goes negative - once we step out of the entry context we can't return
- if newDepth >= 0 {
- newDepth++
- }
- }
- }
- p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
- }
- }
-}
-
-func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool {
- if TurnOffLRLoopEntryBranchOpt {
- return false
- }
-
- _p := config.GetState()
-
- // First check to see if we are in StarLoopEntryState generated during
- // left-recursion elimination. For efficiency, also check if
- // the context has an empty stack case. If so, it would mean
- // global FOLLOW so we can't perform optimization
- if _p.GetStateType() != ATNStateStarLoopEntry {
- return false
- }
- startLoop, ok := _p.(*StarLoopEntryState)
- if !ok {
- return false
- }
- if !startLoop.precedenceRuleDecision ||
- config.GetContext().isEmpty() ||
- config.GetContext().hasEmptyPath() {
- return false
- }
-
- // Require all return states to return back to the same rule
- // that p is in.
- numCtxs := config.GetContext().length()
- for i := 0; i < numCtxs; i++ {
- returnState := p.atn.states[config.GetContext().getReturnState(i)]
- if returnState.GetRuleIndex() != _p.GetRuleIndex() {
- return false
- }
- }
- x := _p.GetTransitions()[0].getTarget()
- decisionStartState := x.(BlockStartState)
- blockEndStateNum := decisionStartState.getEndState().stateNumber
- blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
-
- // Verify that the top of each stack context leads to loop entry/exit
- // state through epsilon edges and w/o leaving rule.
-
- for i := 0; i < numCtxs; i++ { // for each stack context
- returnStateNumber := config.GetContext().getReturnState(i)
- returnState := p.atn.states[returnStateNumber]
-
- // all states must have single outgoing epsilon edge
- if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
- return false
- }
-
- // Look for prefix op case like 'not expr', (' type ')' expr
- returnStateTarget := returnState.GetTransitions()[0].getTarget()
- if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
- continue
- }
-
- // Look for 'expr op expr' or case where expr's return state is block end
- // of (...)* internal block; the block end points to loop back
- // which points to p but we don't need to check that
- if returnState == blockEndState {
- continue
- }
-
- // Look for ternary expr ? expr : expr. The return state points at block end,
- // which points at loop entry state
- if returnStateTarget == blockEndState {
- continue
- }
-
- // Look for complex prefix 'between expr and expr' case where 2nd expr's
- // return state points at block end state of (...)* internal block
- if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
- len(returnStateTarget.GetTransitions()) == 1 &&
- returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
- returnStateTarget.GetTransitions()[0].getTarget() == _p {
- continue
- }
-
- // anything else ain't conforming
- return false
- }
-
- return true
-}
-
-func (p *ParserATNSimulator) getRuleName(index int) string {
- if p.parser != nil && index >= 0 {
- return p.parser.GetRuleNames()[index]
- }
- var sb strings.Builder
- sb.Grow(32)
-
- sb.WriteString("')
- return sb.String()
-}
-
-func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig {
-
- switch t.getSerializationType() {
- case TransitionRULE:
- return p.ruleTransition(config, t.(*RuleTransition))
- case TransitionPRECEDENCE:
- return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
- case TransitionPREDICATE:
- return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
- case TransitionACTION:
- return p.actionTransition(config, t.(*ActionTransition))
- case TransitionEPSILON:
- return NewBaseATNConfig4(config, t.getTarget())
- case TransitionATOM, TransitionRANGE, TransitionSET:
- // EOF transitions act like epsilon transitions after the first EOF
- // transition is traversed
- if treatEOFAsEpsilon {
- if t.Matches(TokenEOF, 0, 1) {
- return NewBaseATNConfig4(config, t.getTarget())
- }
- }
- return nil
- default:
- return nil
- }
-}
-
-func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig {
- if ParserATNSimulatorDebug {
- fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
- }
- return NewBaseATNConfig4(config, t.getTarget())
-}
-
-func (p *ParserATNSimulator) precedenceTransition(config ATNConfig,
- pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
-
- if ParserATNSimulatorDebug {
- fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
- strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
- if p.parser != nil {
- fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
- }
- }
- var c *BaseATNConfig
- if collectPredicates && inContext {
- if fullCtx {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- currentPosition := p.input.Index()
- p.input.Seek(p.startIndex)
- predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
- p.input.Seek(currentPosition)
- if predSucceeds {
- c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
- }
- } else {
- newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
- }
- } else {
- c = NewBaseATNConfig4(config, pt.getTarget())
- }
- if ParserATNSimulatorDebug {
- fmt.Println("config from pred transition=" + c.String())
- }
- return c
-}
-
-func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
-
- if ParserATNSimulatorDebug {
- fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
- ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
- if p.parser != nil {
- fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
- }
- }
- var c *BaseATNConfig
- if collectPredicates && (!pt.isCtxDependent || inContext) {
- if fullCtx {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- currentPosition := p.input.Index()
- p.input.Seek(p.startIndex)
- predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
- p.input.Seek(currentPosition)
- if predSucceeds {
- c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
- }
- } else {
- newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
- c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
- }
- } else {
- c = NewBaseATNConfig4(config, pt.getTarget())
- }
- if ParserATNSimulatorDebug {
- fmt.Println("config from pred transition=" + c.String())
- }
- return c
-}
-
-func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig {
- if ParserATNSimulatorDebug {
- fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
- }
- returnState := t.followState
- newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
- return NewBaseATNConfig1(config, t.getTarget(), newContext)
-}
-
-func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet {
- altsets := PredictionModegetConflictingAltSubsets(configs)
- return PredictionModeGetAlts(altsets)
-}
-
-// Sam pointed out a problem with the previous definition, v3, of
-// ambiguous states. If we have another state associated with conflicting
-// alternatives, we should keep going. For example, the following grammar
-//
-// s : (ID | ID ID?) ''
-//
-// When the ATN simulation reaches the state before '', it has a DFA
-// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
-// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node
-// because alternative to has another way to continue, via [6|2|[]].
-// The key is that we have a single state that has config's only associated
-// with a single alternative, 2, and crucially the state transitions
-// among the configurations are all non-epsilon transitions. That means
-// we don't consider any conflicts that include alternative 2. So, we
-// ignore the conflict between alts 1 and 2. We ignore a set of
-// conflicting alts when there is an intersection with an alternative
-// associated with a single alt state in the state&rarrconfig-list map.
-//
-// It's also the case that we might have two conflicting configurations but
-// also a 3rd nonconflicting configuration for a different alternative:
-// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
-//
-// a : A | A | A B
-//
-// After Matching input A, we reach the stop state for rule A, state 1.
-// State 8 is the state right before B. Clearly alternatives 1 and 2
-// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue and so we do not
-// stop working on p state. In the previous example, we're concerned
-// with states associated with the conflicting alternatives. Here alt
-// 3 is not associated with the conflicting configs, but since we can continue
-// looking for input reasonably, I don't declare the state done. We
-// ignore a set of conflicting alts when we have an alternative
-// that we still need to pursue.
-//
-
-func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet {
- var conflictingAlts *BitSet
- if configs.GetUniqueAlt() != ATNInvalidAltNumber {
- conflictingAlts = NewBitSet()
- conflictingAlts.add(configs.GetUniqueAlt())
- } else {
- conflictingAlts = configs.GetConflictingAlts()
- }
- return conflictingAlts
-}
-
-func (p *ParserATNSimulator) GetTokenName(t int) string {
- if t == TokenEOF {
- return "EOF"
- }
-
- if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
- return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
- }
-
- if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
- return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
- }
-
- return strconv.Itoa(t)
-}
-
-func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
- return p.GetTokenName(input.LA(1))
-}
-
-// Used for debugging in AdaptivePredict around execATN but I cut
-//
-// it out for clarity now that alg. works well. We can leave p
-// "dead" code for a bit.
-func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
-
- panic("Not implemented")
-
- // fmt.Println("dead end configs: ")
- // var decs = nvae.deadEndConfigs
- //
- // for i:=0; i0) {
- // var t = c.state.GetTransitions()[0]
- // if t2, ok := t.(*AtomTransition); ok {
- // trans = "Atom "+ p.GetTokenName(t2.label)
- // } else if t3, ok := t.(SetTransition); ok {
- // _, ok := t.(*NotSetTransition)
- //
- // var s string
- // if (ok){
- // s = "~"
- // }
- //
- // trans = s + "Set " + t3.set
- // }
- // }
- // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
- // }
-}
-
-func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException {
- return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
-}
-
-func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
- alt := ATNInvalidAltNumber
- for _, c := range configs.GetItems() {
- if alt == ATNInvalidAltNumber {
- alt = c.GetAlt() // found first alt
- } else if c.GetAlt() != alt {
- return ATNInvalidAltNumber
- }
- }
- return alt
-}
-
-// Add an edge to the DFA, if possible. This method calls
-// {@link //addDFAState} to ensure the {@code to} state is present in the
-// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
-// range of edges that can be represented in the DFA tables, p method
-// returns without adding the edge to the DFA.
-//
-// If {@code to} is {@code nil}, p method returns {@code nil}.
-// Otherwise, p method returns the {@link DFAState} returned by calling
-// {@link //addDFAState} for the {@code to} state.
-//
-// @param dfa The DFA
-// @param from The source state for the edge
-// @param t The input symbol
-// @param to The target state for the edge
-//
-// @return If {@code to} is {@code nil}, p method returns {@code nil}
-// otherwise p method returns the result of calling {@link //addDFAState}
-// on {@code to}
-func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
- if ParserATNSimulatorDebug {
- fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
- }
- if to == nil {
- return nil
- }
- p.atn.stateMu.Lock()
- to = p.addDFAState(dfa, to) // used existing if possible not incoming
- p.atn.stateMu.Unlock()
- if from == nil || t < -1 || t > p.atn.maxTokenType {
- return to
- }
- p.atn.edgeMu.Lock()
- if from.getEdges() == nil {
- from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
- }
- from.setIthEdge(t+1, to) // connect
- p.atn.edgeMu.Unlock()
-
- if ParserATNSimulatorDebug {
- var names []string
- if p.parser != nil {
- names = p.parser.GetLiteralNames()
- }
-
- fmt.Println("DFA=\n" + dfa.String(names, nil))
- }
- return to
-}
-
-// Add state {@code D} to the DFA if it is not already present, and return
-// the actual instance stored in the DFA. If a state equivalent to {@code D}
-// is already in the DFA, the existing state is returned. Otherwise p
-// method returns {@code D} after adding it to the DFA.
-//
-// If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and
-// does not change the DFA.
-//
-// @param dfa The dfa
-// @param D The DFA state to add
-// @return The state stored in the DFA. This will be either the existing
-// state if {@code D} is already in the DFA, or {@code D} itself if the
-// state was not already present.
-func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
- if d == ATNSimulatorError {
- return d
- }
- existing, present := dfa.states.Get(d)
- if present {
- if ParserATNSimulatorTraceATNSim {
- fmt.Print("addDFAState " + d.String() + " exists")
- }
- return existing
- }
-
- // The state was not present, so update it with configs
- //
- d.stateNumber = dfa.states.Len()
- if !d.configs.ReadOnly() {
- d.configs.OptimizeConfigs(p.BaseATNSimulator)
- d.configs.SetReadOnly(true)
- }
- dfa.states.Put(d)
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("addDFAState new " + d.String())
- }
-
- return d
-}
-
-func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) {
- if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
- interval := NewInterval(startIndex, stopIndex+1)
- fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
- ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
- }
- if p.parser != nil {
- p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
- }
-}
-
-func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) {
- if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
- interval := NewInterval(startIndex, stopIndex+1)
- fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
- ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
- }
- if p.parser != nil {
- p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
- }
-}
-
-// If context sensitive parsing, we know it's ambiguity not conflict//
-func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
- exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
- if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
- interval := NewInterval(startIndex, stopIndex+1)
- fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
- ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
- }
- if p.parser != nil {
- p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
- }
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
deleted file mode 100644
index ba62af361..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go
+++ /dev/null
@@ -1,806 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "golang.org/x/exp/slices"
- "strconv"
-)
-
-// Represents {@code $} in local context prediction, which means wildcard.
-// {@code//+x =//}.
-// /
-const (
- BasePredictionContextEmptyReturnState = 0x7FFFFFFF
-)
-
-// Represents {@code $} in an array in full context mode, when {@code $}
-// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
-// {@code $} = {@link //EmptyReturnState}.
-// /
-
-var (
- BasePredictionContextglobalNodeCount = 1
- BasePredictionContextid = BasePredictionContextglobalNodeCount
-)
-
-type PredictionContext interface {
- Hash() int
- Equals(interface{}) bool
- GetParent(int) PredictionContext
- getReturnState(int) int
- length() int
- isEmpty() bool
- hasEmptyPath() bool
- String() string
-}
-
-type BasePredictionContext struct {
- cachedHash int
-}
-
-func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
- pc := new(BasePredictionContext)
- pc.cachedHash = cachedHash
-
- return pc
-}
-
-func (b *BasePredictionContext) isEmpty() bool {
- return false
-}
-
-func calculateHash(parent PredictionContext, returnState int) int {
- h := murmurInit(1)
- h = murmurUpdate(h, parent.Hash())
- h = murmurUpdate(h, returnState)
- return murmurFinish(h, 2)
-}
-
-var _emptyPredictionContextHash int
-
-func init() {
- _emptyPredictionContextHash = murmurInit(1)
- _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
-}
-
-func calculateEmptyHash() int {
- return _emptyPredictionContextHash
-}
-
-// Used to cache {@link BasePredictionContext} objects. Its used for the shared
-// context cash associated with contexts in DFA states. This cache
-// can be used for both lexers and parsers.
-
-type PredictionContextCache struct {
- cache map[PredictionContext]PredictionContext
-}
-
-func NewPredictionContextCache() *PredictionContextCache {
- t := new(PredictionContextCache)
- t.cache = make(map[PredictionContext]PredictionContext)
- return t
-}
-
-// Add a context to the cache and return it. If the context already exists,
-// return that one instead and do not add a Newcontext to the cache.
-// Protect shared cache from unsafe thread access.
-func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
- if ctx == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY
- }
- existing := p.cache[ctx]
- if existing != nil {
- return existing
- }
- p.cache[ctx] = ctx
- return ctx
-}
-
-func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
- return p.cache[ctx]
-}
-
-func (p *PredictionContextCache) length() int {
- return len(p.cache)
-}
-
-type SingletonPredictionContext interface {
- PredictionContext
-}
-
-type BaseSingletonPredictionContext struct {
- *BasePredictionContext
-
- parentCtx PredictionContext
- returnState int
-}
-
-func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
- var cachedHash int
- if parent != nil {
- cachedHash = calculateHash(parent, returnState)
- } else {
- cachedHash = calculateEmptyHash()
- }
-
- s := new(BaseSingletonPredictionContext)
- s.BasePredictionContext = NewBasePredictionContext(cachedHash)
-
- s.parentCtx = parent
- s.returnState = returnState
-
- return s
-}
-
-func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
- if returnState == BasePredictionContextEmptyReturnState && parent == nil {
- // someone can pass in the bits of an array ctx that mean $
- return BasePredictionContextEMPTY
- }
-
- return NewBaseSingletonPredictionContext(parent, returnState)
-}
-
-func (b *BaseSingletonPredictionContext) length() int {
- return 1
-}
-
-func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
- return b.parentCtx
-}
-
-func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
- return b.returnState
-}
-
-func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
- return b.returnState == BasePredictionContextEmptyReturnState
-}
-
-func (b *BaseSingletonPredictionContext) Hash() int {
- return b.cachedHash
-}
-
-func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool {
- if b == other {
- return true
- }
- if _, ok := other.(*BaseSingletonPredictionContext); !ok {
- return false
- }
-
- otherP := other.(*BaseSingletonPredictionContext)
-
- if b.returnState != otherP.getReturnState(0) {
- return false
- }
- if b.parentCtx == nil {
- return otherP.parentCtx == nil
- }
-
- return b.parentCtx.Equals(otherP.parentCtx)
-}
-
-func (b *BaseSingletonPredictionContext) String() string {
- var up string
-
- if b.parentCtx == nil {
- up = ""
- } else {
- up = b.parentCtx.String()
- }
-
- if len(up) == 0 {
- if b.returnState == BasePredictionContextEmptyReturnState {
- return "$"
- }
-
- return strconv.Itoa(b.returnState)
- }
-
- return strconv.Itoa(b.returnState) + " " + up
-}
-
-var BasePredictionContextEMPTY = NewEmptyPredictionContext()
-
-type EmptyPredictionContext struct {
- *BaseSingletonPredictionContext
-}
-
-func NewEmptyPredictionContext() *EmptyPredictionContext {
-
- p := new(EmptyPredictionContext)
-
- p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
- p.cachedHash = calculateEmptyHash()
- return p
-}
-
-func (e *EmptyPredictionContext) isEmpty() bool {
- return true
-}
-
-func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
- return nil
-}
-
-func (e *EmptyPredictionContext) getReturnState(index int) int {
- return e.returnState
-}
-
-func (e *EmptyPredictionContext) Hash() int {
- return e.cachedHash
-}
-
-func (e *EmptyPredictionContext) Equals(other interface{}) bool {
- return e == other
-}
-
-func (e *EmptyPredictionContext) String() string {
- return "$"
-}
-
-type ArrayPredictionContext struct {
- *BasePredictionContext
-
- parents []PredictionContext
- returnStates []int
-}
-
-func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
- // Parent can be nil only if full ctx mode and we make an array
- // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
- // nil parent and
- // returnState == {@link //EmptyReturnState}.
- hash := murmurInit(1)
-
- for _, parent := range parents {
- hash = murmurUpdate(hash, parent.Hash())
- }
-
- for _, returnState := range returnStates {
- hash = murmurUpdate(hash, returnState)
- }
-
- hash = murmurFinish(hash, len(parents)<<1)
-
- c := new(ArrayPredictionContext)
- c.BasePredictionContext = NewBasePredictionContext(hash)
-
- c.parents = parents
- c.returnStates = returnStates
-
- return c
-}
-
-func (a *ArrayPredictionContext) GetReturnStates() []int {
- return a.returnStates
-}
-
-func (a *ArrayPredictionContext) hasEmptyPath() bool {
- return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
-}
-
-func (a *ArrayPredictionContext) isEmpty() bool {
- // since EmptyReturnState can only appear in the last position, we
- // don't need to verify that size==1
- return a.returnStates[0] == BasePredictionContextEmptyReturnState
-}
-
-func (a *ArrayPredictionContext) length() int {
- return len(a.returnStates)
-}
-
-func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
- return a.parents[index]
-}
-
-func (a *ArrayPredictionContext) getReturnState(index int) int {
- return a.returnStates[index]
-}
-
-// Equals is the default comparison function for ArrayPredictionContext when no specialized
-// implementation is needed for a collection
-func (a *ArrayPredictionContext) Equals(o interface{}) bool {
- if a == o {
- return true
- }
- other, ok := o.(*ArrayPredictionContext)
- if !ok {
- return false
- }
- if a.cachedHash != other.Hash() {
- return false // can't be same if hash is different
- }
-
- // Must compare the actual array elements and not just the array address
- //
- return slices.Equal(a.returnStates, other.returnStates) &&
- slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool {
- return x.Equals(y)
- })
-}
-
-// Hash is the default hash function for ArrayPredictionContext when no specialized
-// implementation is needed for a collection
-func (a *ArrayPredictionContext) Hash() int {
- return a.BasePredictionContext.cachedHash
-}
-
-func (a *ArrayPredictionContext) String() string {
- if a.isEmpty() {
- return "[]"
- }
-
- s := "["
- for i := 0; i < len(a.returnStates); i++ {
- if i > 0 {
- s = s + ", "
- }
- if a.returnStates[i] == BasePredictionContextEmptyReturnState {
- s = s + "$"
- continue
- }
- s = s + strconv.Itoa(a.returnStates[i])
- if a.parents[i] != nil {
- s = s + " " + a.parents[i].String()
- } else {
- s = s + "nil"
- }
- }
-
- return s + "]"
-}
-
-// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
-// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
-// /
-func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
- if outerContext == nil {
- outerContext = ParserRuleContextEmpty
- }
- // if we are in RuleContext of start rule, s, then BasePredictionContext
- // is EMPTY. Nobody called us. (if we are empty, return empty)
- if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
- return BasePredictionContextEMPTY
- }
- // If we have a parent, convert it to a BasePredictionContext graph
- parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
- state := a.states[outerContext.GetInvokingState()]
- transition := state.GetTransitions()[0]
-
- return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
-}
-
-func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
-
- // Share same graph if both same
- //
- if a == b || a.Equals(b) {
- return a
- }
-
- // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test
- // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created
- // from it.
- // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion
- // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from
- // either of them.
-
- ac, ok1 := a.(*BaseSingletonPredictionContext)
- bc, ok2 := b.(*BaseSingletonPredictionContext)
-
- if ok1 && ok2 {
- return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
- }
- // At least one of a or b is array
- // If one is $ and rootIsWildcard, return $ as// wildcard
- if rootIsWildcard {
- if _, ok := a.(*EmptyPredictionContext); ok {
- return a
- }
- if _, ok := b.(*EmptyPredictionContext); ok {
- return b
- }
- }
-
- // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters
- // here.
- //
- // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here
-
- var arp, arb *ArrayPredictionContext
- var ok bool
- if arp, ok = a.(*ArrayPredictionContext); ok {
- } else if _, ok = a.(*BaseSingletonPredictionContext); ok {
- arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
- } else if _, ok = a.(*EmptyPredictionContext); ok {
- arp = NewArrayPredictionContext([]PredictionContext{}, []int{})
- }
-
- if arb, ok = b.(*ArrayPredictionContext); ok {
- } else if _, ok = b.(*BaseSingletonPredictionContext); ok {
- arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
- } else if _, ok = b.(*EmptyPredictionContext); ok {
- arb = NewArrayPredictionContext([]PredictionContext{}, []int{})
- }
-
- // Both arp and arb
- return mergeArrays(arp, arb, rootIsWildcard, mergeCache)
-}
-
-// Merge two {@link SingletonBasePredictionContext} instances.
-//
-// Stack tops equal, parents merge is same return left graph.
-//
-//
-// Same stack top, parents differ merge parents giving array node, then
-// remainders of those graphs. A Newroot node is created to point to the
-// merged parents.
-//
-//
-// Different stack tops pointing to same parent. Make array node for the
-// root where both element in the root point to the same (original)
-// parent.
-//
-//
-// Different stack tops pointing to different parents. Make array node for
-// the root where each element points to the corresponding original
-// parent.
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// @param mergeCache
-// /
-func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
- if mergeCache != nil {
- previous := mergeCache.Get(a.Hash(), b.Hash())
- if previous != nil {
- return previous.(PredictionContext)
- }
- previous = mergeCache.Get(b.Hash(), a.Hash())
- if previous != nil {
- return previous.(PredictionContext)
- }
- }
-
- rootMerge := mergeRoot(a, b, rootIsWildcard)
- if rootMerge != nil {
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), rootMerge)
- }
- return rootMerge
- }
- if a.returnState == b.returnState {
- parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
- // if parent is same as existing a or b parent or reduced to a parent,
- // return it
- if parent == a.parentCtx {
- return a // ax + bx = ax, if a=b
- }
- if parent == b.parentCtx {
- return b // ax + bx = bx, if a=b
- }
- // else: ax + ay = a'[x,y]
- // merge parents x and y, giving array node with x,y then remainders
- // of those graphs. dup a, a' points at merged array
- // Newjoined parent so create Newsingleton pointing to it, a'
- spc := SingletonBasePredictionContextCreate(parent, a.returnState)
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), spc)
- }
- return spc
- }
- // a != b payloads differ
- // see if we can collapse parents due to $+x parents if local ctx
- var singleParent PredictionContext
- if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
- // bx =
- // [a,b]x
- singleParent = a.parentCtx
- }
- if singleParent != nil { // parents are same
- // sort payloads and use same parent
- payloads := []int{a.returnState, b.returnState}
- if a.returnState > b.returnState {
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- }
- parents := []PredictionContext{singleParent, singleParent}
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), apc)
- }
- return apc
- }
- // parents differ and can't merge them. Just pack together
- // into array can't merge.
- // ax + by = [ax,by]
- payloads := []int{a.returnState, b.returnState}
- parents := []PredictionContext{a.parentCtx, b.parentCtx}
- if a.returnState > b.returnState { // sort by payload
- payloads[0] = b.returnState
- payloads[1] = a.returnState
- parents = []PredictionContext{b.parentCtx, a.parentCtx}
- }
- apc := NewArrayPredictionContext(parents, payloads)
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), apc)
- }
- return apc
-}
-
-// Handle case where at least one of {@code a} or {@code b} is
-// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
-// to represent {@link //EMPTY}.
-//
-// Local-Context Merges
-//
-// These local-context merge operations are used when {@code rootIsWildcard}
-// is true.
-//
-// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//
-//
-// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
-// {@code //EMPTY} return left graph.
-//
-//
-// Special case of last merge if local context.
-//
-//
-// Full-Context Merges
-//
-// These full-context merge operations are used when {@code rootIsWildcard}
-// is false.
-//
-//
-//
-// Must keep all contexts {@link //EMPTY} in array is a special value (and
-// nil parent).
-//
-//
-//
-//
-// @param a the first {@link SingletonBasePredictionContext}
-// @param b the second {@link SingletonBasePredictionContext}
-// @param rootIsWildcard {@code true} if this is a local-context merge,
-// otherwise false to indicate a full-context merge
-// /
-func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
- if rootIsWildcard {
- if a == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY // // + b =//
- }
- if b == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY // a +// =//
- }
- } else {
- if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
- return BasePredictionContextEMPTY // $ + $ = $
- } else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
- payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []PredictionContext{b.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
- payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
- parents := []PredictionContext{a.GetParent(-1), nil}
- return NewArrayPredictionContext(parents, payloads)
- }
- }
- return nil
-}
-
-// Merge two {@link ArrayBasePredictionContext} instances.
-//
-// Different tops, different parents.
-//
-//
-// Shared top, same parents.
-//
-//
-// Shared top, different parents.
-//
-//
-// Shared top, all shared parents.
-//
-//
-// Equal tops, merge parents and reduce top to
-// {@link SingletonBasePredictionContext}.
-//
-// /
-func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
- if mergeCache != nil {
- previous := mergeCache.Get(a.Hash(), b.Hash())
- if previous != nil {
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
- }
- return previous.(PredictionContext)
- }
- previous = mergeCache.Get(b.Hash(), a.Hash())
- if previous != nil {
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
- }
- return previous.(PredictionContext)
- }
- }
- // merge sorted payloads a + b => M
- i := 0 // walks a
- j := 0 // walks b
- k := 0 // walks target M array
-
- mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
- mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
- // walk and merge to yield mergedParents, mergedReturnStates
- for i < len(a.returnStates) && j < len(b.returnStates) {
- aParent := a.parents[i]
- bParent := b.parents[j]
- if a.returnStates[i] == b.returnStates[j] {
- // same payload (stack tops are equal), must yield merged singleton
- payload := a.returnStates[i]
- // $+$ = $
- bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
- axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax
- // ->
- // ax
- if bothDollars || axAX {
- mergedParents[k] = aParent // choose left
- mergedReturnStates[k] = payload
- } else { // ax+ay -> a'[x,y]
- mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
- mergedParents[k] = mergedParent
- mergedReturnStates[k] = payload
- }
- i++ // hop over left one as usual
- j++ // but also Skip one in right side since we merge
- } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
- mergedParents[k] = aParent
- mergedReturnStates[k] = a.returnStates[i]
- i++
- } else { // b > a, copy b[j] to M
- mergedParents[k] = bParent
- mergedReturnStates[k] = b.returnStates[j]
- j++
- }
- k++
- }
- // copy over any payloads remaining in either array
- if i < len(a.returnStates) {
- for p := i; p < len(a.returnStates); p++ {
- mergedParents[k] = a.parents[p]
- mergedReturnStates[k] = a.returnStates[p]
- k++
- }
- } else {
- for p := j; p < len(b.returnStates); p++ {
- mergedParents[k] = b.parents[p]
- mergedReturnStates[k] = b.returnStates[p]
- k++
- }
- }
- // trim merged if we combined a few that had same stack tops
- if k < len(mergedParents) { // write index < last position trim
- if k == 1 { // for just one merged element, return singleton top
- pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), pc)
- }
- return pc
- }
- mergedParents = mergedParents[0:k]
- mergedReturnStates = mergedReturnStates[0:k]
- }
-
- M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
-
- // if we created same array as a or b, return that instead
- // TODO: track whether this is possible above during merge sort for speed
- // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems
- if M == a {
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), a)
- }
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
- }
- return a
- }
- if M == b {
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), b)
- }
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
- }
- return b
- }
- combineCommonParents(mergedParents)
-
- if mergeCache != nil {
- mergeCache.set(a.Hash(), b.Hash(), M)
- }
- if ParserATNSimulatorTraceATNSim {
- fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
- }
- return M
-}
-
-// Make pass over all M {@code parents} merge any {@code equals()}
-// ones.
-// /
-func combineCommonParents(parents []PredictionContext) {
- uniqueParents := make(map[PredictionContext]PredictionContext)
-
- for p := 0; p < len(parents); p++ {
- parent := parents[p]
- if uniqueParents[parent] == nil {
- uniqueParents[parent] = parent
- }
- }
- for q := 0; q < len(parents); q++ {
- parents[q] = uniqueParents[parents[q]]
- }
-}
-
-func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
-
- if context.isEmpty() {
- return context
- }
- existing := visited[context]
- if existing != nil {
- return existing
- }
- existing = contextCache.Get(context)
- if existing != nil {
- visited[context] = existing
- return existing
- }
- changed := false
- parents := make([]PredictionContext, context.length())
- for i := 0; i < len(parents); i++ {
- parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
- if changed || parent != context.GetParent(i) {
- if !changed {
- parents = make([]PredictionContext, context.length())
- for j := 0; j < context.length(); j++ {
- parents[j] = context.GetParent(j)
- }
- changed = true
- }
- parents[i] = parent
- }
- }
- if !changed {
- contextCache.add(context)
- visited[context] = context
- return context
- }
- var updated PredictionContext
- if len(parents) == 0 {
- updated = BasePredictionContextEMPTY
- } else if len(parents) == 1 {
- updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
- } else {
- updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
- }
- contextCache.add(updated)
- visited[updated] = updated
- visited[context] = updated
-
- return updated
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
deleted file mode 100644
index 7b9b72fab..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go
+++ /dev/null
@@ -1,529 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// This enumeration defines the prediction modes available in ANTLR 4 along with
-// utility methods for analyzing configuration sets for conflicts and/or
-// ambiguities.
-
-const (
- //
- // The SLL(*) prediction mode. This prediction mode ignores the current
- // parser context when making predictions. This is the fastest prediction
- // mode, and provides correct results for many grammars. This prediction
- // mode is more powerful than the prediction mode provided by ANTLR 3, but
- // may result in syntax errors for grammar and input combinations which are
- // not SLL.
- //
- //
- // When using this prediction mode, the parser will either return a correct
- // parse tree (i.e. the same parse tree that would be returned with the
- // {@link //LL} prediction mode), or it will Report a syntax error. If a
- // syntax error is encountered when using the {@link //SLL} prediction mode,
- // it may be due to either an actual syntax error in the input or indicate
- // that the particular combination of grammar and input requires the more
- // powerful {@link //LL} prediction abilities to complete successfully.
- //
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeSLL = 0
- //
- // The LL(*) prediction mode. This prediction mode allows the current parser
- // context to be used for resolving SLL conflicts that occur during
- // prediction. This is the fastest prediction mode that guarantees correct
- // parse results for all combinations of grammars with syntactically correct
- // inputs.
- //
- //
- // When using this prediction mode, the parser will make correct decisions
- // for all syntactically-correct grammar and input combinations. However, in
- // cases where the grammar is truly ambiguous this prediction mode might not
- // Report a precise answer for exactly which alternatives are
- // ambiguous.
- //
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLL = 1
- //
- // The LL(*) prediction mode with exact ambiguity detection. In addition to
- // the correctness guarantees provided by the {@link //LL} prediction mode,
- // this prediction mode instructs the prediction algorithm to determine the
- // complete and exact set of ambiguous alternatives for every ambiguous
- // decision encountered while parsing.
- //
- //
- // This prediction mode may be used for diagnosing ambiguities during
- // grammar development. Due to the performance overhead of calculating sets
- // of ambiguous alternatives, this prediction mode should be avoided when
- // the exact results are not necessary.
- //
- //
- // This prediction mode does not provide any guarantees for prediction
- // behavior for syntactically-incorrect inputs.
- //
- PredictionModeLLExactAmbigDetection = 2
-)
-
-// Computes the SLL prediction termination condition.
-//
-//
-// This method computes the SLL prediction termination condition for both of
-// the following cases.
-//
-//
-// - The usual SLL+LL fallback upon SLL conflict
-// - Pure SLL without LL fallback
-//
-//
-// COMBINED SLL+LL PARSING
-//
-// When LL-fallback is enabled upon SLL conflict, correct predictions are
-// ensured regardless of how the termination condition is computed by this
-// method. Due to the substantially higher cost of LL prediction, the
-// prediction should only fall back to LL when the additional lookahead
-// cannot lead to a unique SLL prediction.
-//
-// Assuming combined SLL+LL parsing, an SLL configuration set with only
-// conflicting subsets should fall back to full LL, even if the
-// configuration sets don't resolve to the same alternative (e.g.
-// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
-// configuration, SLL could continue with the hopes that more lookahead will
-// resolve via one of those non-conflicting configurations.
-//
-// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
-// stops when it sees only conflicting configuration subsets. In contrast,
-// full LL keeps going when there is uncertainty.
-//
-// HEURISTIC
-//
-// As a heuristic, we stop prediction when we see any conflicting subset
-// unless we see a state that only has one alternative associated with it.
-// The single-alt-state thing lets prediction continue upon rules like
-// (otherwise, it would admit defeat too soon):
-//
-// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }
-//
-// When the ATN simulation reaches the state before {@code ”}, it has a
-// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
-// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
-// processing this node because alternative to has another way to continue,
-// via {@code [6|2|[]]}.
-//
-// It also let's us continue for this rule:
-//
-// {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
-//
-// After Matching input A, we reach the stop state for rule A, state 1.
-// State 8 is the state right before B. Clearly alternatives 1 and 2
-// conflict and no amount of further lookahead will separate the two.
-// However, alternative 3 will be able to continue and so we do not stop
-// working on this state. In the previous example, we're concerned with
-// states associated with the conflicting alternatives. Here alt 3 is not
-// associated with the conflicting configs, but since we can continue
-// looking for input reasonably, don't declare the state done.
-//
-// PURE SLL PARSING
-//
-// To handle pure SLL parsing, all we have to do is make sure that we
-// combine stack contexts for configurations that differ only by semantic
-// predicate. From there, we can do the usual SLL termination heuristic.
-//
-// PREDICATES IN SLL+LL PARSING
-//
-// SLL decisions don't evaluate predicates until after they reach DFA stop
-// states because they need to create the DFA cache that works in all
-// semantic situations. In contrast, full LL evaluates predicates collected
-// during start state computation so it can ignore predicates thereafter.
-// This means that SLL termination detection can totally ignore semantic
-// predicates.
-//
-// Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
-// semantic predicate contexts so we might see two configurations like the
-// following.
-//
-// {@code (s, 1, x, {}), (s, 1, x', {p})}
-//
-// Before testing these configurations against others, we have to merge
-// {@code x} and {@code x'} (without modifying the existing configurations).
-// For example, we test {@code (x+x')==x”} when looking for conflicts in
-// the following configurations.
-//
-// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}
-//
-// If the configuration set has predicates (as indicated by
-// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
-// the configurations to strip out all of the predicates so that a standard
-// {@link ATNConfigSet} will merge everything ignoring predicates.
-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
- // Configs in rule stop states indicate reaching the end of the decision
- // rule (local context) or end of start rule (full context). If all
- // configs meet this condition, then none of the configurations is able
- // to Match additional input so we terminate prediction.
- //
- if PredictionModeallConfigsInRuleStopStates(configs) {
- return true
- }
- // pure SLL mode parsing
- if mode == PredictionModeSLL {
- // Don't bother with combining configs from different semantic
- // contexts if we can fail over to full LL costs more time
- // since we'll often fail over anyway.
- if configs.HasSemanticContext() {
- // dup configs, tossing out semantic predicates
- dup := NewBaseATNConfigSet(false)
- for _, c := range configs.GetItems() {
-
- // NewBaseATNConfig({semanticContext:}, c)
- c = NewBaseATNConfig2(c, SemanticContextNone)
- dup.Add(c, nil)
- }
- configs = dup
- }
- // now we have combined contexts for configs with dissimilar preds
- }
- // pure SLL or combined SLL+LL mode parsing
- altsets := PredictionModegetConflictingAltSubsets(configs)
- return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
-}
-
-// Checks if any configuration in {@code configs} is in a
-// {@link RuleStopState}. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// @param configs the configuration set to test
-// @return {@code true} if any configuration in {@code configs} is in a
-// {@link RuleStopState}, otherwise {@code false}
-func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
- for _, c := range configs.GetItems() {
- if _, ok := c.GetState().(*RuleStopState); ok {
- return true
- }
- }
- return false
-}
-
-// Checks if all configurations in {@code configs} are in a
-// {@link RuleStopState}. Configurations meeting this condition have reached
-// the end of the decision rule (local context) or end of start rule (full
-// context).
-//
-// @param configs the configuration set to test
-// @return {@code true} if all configurations in {@code configs} are in a
-// {@link RuleStopState}, otherwise {@code false}
-func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
-
- for _, c := range configs.GetItems() {
- if _, ok := c.GetState().(*RuleStopState); !ok {
- return false
- }
- }
- return true
-}
-
-// Full LL prediction termination.
-//
-// Can we stop looking ahead during ATN simulation or is there some
-// uncertainty as to which alternative we will ultimately pick, after
-// consuming more input? Even if there are partial conflicts, we might know
-// that everything is going to resolve to the same minimum alternative. That
-// means we can stop since no more lookahead will change that fact. On the
-// other hand, there might be multiple conflicts that resolve to different
-// minimums. That means we need more look ahead to decide which of those
-// alternatives we should predict.
-//
-// The basic idea is to split the set of configurations {@code C}, into
-// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
-// non-conflicting configurations. Two configurations conflict if they have
-// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
-// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
-// and {@code (s, j, ctx, _)} for {@code i!=j}.
-//
-// Reduce these configuration subsets to the set of possible alternatives.
-// You can compute the alternative subsets in one pass as follows:
-//
-// {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
-// {@code C} holding {@code s} and {@code ctx} fixed.
-//
-// Or in pseudo-code, for each configuration {@code c} in {@code C}:
-//
-//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-//
-//
-// The values in {@code map} are the set of {@code A_s,ctx} sets.
-//
-// If {@code |A_s,ctx|=1} then there is no conflict associated with
-// {@code s} and {@code ctx}.
-//
-// Reduce the subsets to singletons by choosing a minimum of each subset. If
-// the union of these alternative subsets is a singleton, then no amount of
-// more lookahead will help us. We will always pick that alternative. If,
-// however, there is more than one alternative, then we are uncertain which
-// alternative to predict and must continue looking for resolution. We may
-// or may not discover an ambiguity in the future, even if there are no
-// conflicting subsets this round.
-//
-// The biggest sin is to terminate early because it means we've made a
-// decision but were uncertain as to the eventual outcome. We haven't used
-// enough lookahead. On the other hand, announcing a conflict too late is no
-// big deal you will still have the conflict. It's just inefficient. It
-// might even look until the end of file.
-//
-// No special consideration for semantic predicates is required because
-// predicates are evaluated on-the-fly for full LL prediction, ensuring that
-// no configuration contains a semantic context during the termination
-// check.
-//
-// CONFLICTING CONFIGS
-//
-// Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
-// when {@code i!=j} but {@code x=x'}. Because we merge all
-// {@code (s, i, _)} configurations together, that means that there are at
-// most {@code n} configurations associated with state {@code s} for
-// {@code n} possible alternatives in the decision. The merged stacks
-// complicate the comparison of configuration contexts {@code x} and
-// {@code x'}. Sam checks to see if one is a subset of the other by calling
-// merge and checking to see if the merged result is either {@code x} or
-// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
-// is the superset, then {@code i} is the only possible prediction since the
-// others resolve to {@code min(i)} as well. However, if {@code x} is
-// associated with {@code j>i} then at least one stack configuration for
-// {@code j} is not in conflict with alternative {@code i}. The algorithm
-// should keep going, looking for more lookahead due to the uncertainty.
-//
-// For simplicity, I'm doing a equality check between {@code x} and
-// {@code x'} that lets the algorithm continue to consume lookahead longer
-// than necessary. The reason I like the equality is of course the
-// simplicity but also because that is the test you need to detect the
-// alternatives that are actually in conflict.
-//
-// CONTINUE/STOP RULE
-//
-// Continue if union of resolved alternative sets from non-conflicting and
-// conflicting alternative subsets has more than one alternative. We are
-// uncertain about which alternative to predict.
-//
-// The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
-// alternatives are still in the running for the amount of input we've
-// consumed at this point. The conflicting sets let us to strip away
-// configurations that won't lead to more states because we resolve
-// conflicts to the configuration with a minimum alternate for the
-// conflicting set.
-//
-// CASES
-//
-//
-//
-// - no conflicts and more than 1 alternative in set => continue
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
-// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
-// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
-// {@code {1,3}} => continue
-//
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
-// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set
-// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
-// {@code {1}} => stop and predict 1
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
-// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
-// {@code {1}} = {@code {1}} => stop and predict 1, can announce
-// ambiguity {@code {1,2}}
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
-// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
-// {@code {2}} = {@code {1,2}} => continue
-//
-// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
-// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
-// {@code {3}} = {@code {1,3}} => continue
-//
-//
-//
-// EXACT AMBIGUITY DETECTION
-//
-// If all states Report the same conflicting set of alternatives, then we
-// know we have the exact ambiguity set.
-//
-// |A_i|>1 and
-// A_i = A_j for all i, j.
-//
-// In other words, we continue examining lookahead until all {@code A_i}
-// have more than one alternative and all {@code A_i} are the same. If
-// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
-// because the resolved set is {@code {1}}. To determine what the real
-// ambiguity is, we have to know whether the ambiguity is between one and
-// two or one and three so we keep going. We can only stop prediction when
-// we need exact ambiguity detection when the sets look like
-// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
- return PredictionModegetSingleViableAlt(altsets)
-}
-
-// Determines if every alternative subset in {@code altsets} contains more
-// than one alternative.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if every {@link BitSet} in {@code altsets} has
-// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
- return !PredictionModehasNonConflictingAltSet(altsets)
-}
-
-// Determines if any single alternative subset in {@code altsets} contains
-// exactly one alternative.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if {@code altsets} contains a {@link BitSet} with
-// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
-func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() == 1 {
- return true
- }
- }
- return false
-}
-
-// Determines if any single alternative subset in {@code altsets} contains
-// more than one alternative.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if {@code altsets} contains a {@link BitSet} with
-// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
-func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if alts.length() > 1 {
- return true
- }
- }
- return false
-}
-
-// Determines if every alternative subset in {@code altsets} is equivalent.
-//
-// @param altsets a collection of alternative subsets
-// @return {@code true} if every member of {@code altsets} is equal to the
-// others, otherwise {@code false}
-func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
- var first *BitSet
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- if first == nil {
- first = alts
- } else if alts != first {
- return false
- }
- }
-
- return true
-}
-
-// Returns the unique alternative predicted by all alternative subsets in
-// {@code altsets}. If no such alternative exists, this method returns
-// {@link ATN//INVALID_ALT_NUMBER}.
-//
-// @param altsets a collection of alternative subsets
-func PredictionModegetUniqueAlt(altsets []*BitSet) int {
- all := PredictionModeGetAlts(altsets)
- if all.length() == 1 {
- return all.minValue()
- }
-
- return ATNInvalidAltNumber
-}
-
-// Gets the complete set of represented alternatives for a collection of
-// alternative subsets. This method returns the union of each {@link BitSet}
-// in {@code altsets}.
-//
-// @param altsets a collection of alternative subsets
-// @return the set of represented alternatives in {@code altsets}
-func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
- all := NewBitSet()
- for _, alts := range altsets {
- all.or(alts)
- }
- return all
-}
-
-// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
-// For each configuration {@code c} in {@code configs}:
-//
-//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-//
-func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
- configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst)
-
- for _, c := range configs.GetItems() {
-
- alts, ok := configToAlts.Get(c)
- if !ok {
- alts = NewBitSet()
- configToAlts.Put(c, alts)
- }
- alts.add(c.GetAlt())
- }
-
- return configToAlts.Values()
-}
-
-// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each
-// configuration {@code c} in {@code configs}:
-//
-//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-//
-func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
- m := NewAltDict()
-
- for _, c := range configs.GetItems() {
- alts := m.Get(c.GetState().String())
- if alts == nil {
- alts = NewBitSet()
- m.put(c.GetState().String(), alts)
- }
- alts.(*BitSet).add(c.GetAlt())
- }
- return m
-}
-
-func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
- values := PredictionModeGetStateToAltMap(configs).values()
- for i := 0; i < len(values); i++ {
- if values[i].(*BitSet).length() == 1 {
- return true
- }
- }
- return false
-}
-
-func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
- result := ATNInvalidAltNumber
-
- for i := 0; i < len(altsets); i++ {
- alts := altsets[i]
- minAlt := alts.minValue()
- if result == ATNInvalidAltNumber {
- result = minAlt
- } else if result != minAlt { // more than 1 viable alt
- return ATNInvalidAltNumber
- }
- }
- return result
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
deleted file mode 100644
index bfe542d09..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strings"
-
- "strconv"
-)
-
-type Recognizer interface {
- GetLiteralNames() []string
- GetSymbolicNames() []string
- GetRuleNames() []string
-
- Sempred(RuleContext, int, int) bool
- Precpred(RuleContext, int) bool
-
- GetState() int
- SetState(int)
- Action(RuleContext, int, int)
- AddErrorListener(ErrorListener)
- RemoveErrorListeners()
- GetATN() *ATN
- GetErrorListenerDispatch() ErrorListener
-}
-
-type BaseRecognizer struct {
- listeners []ErrorListener
- state int
-
- RuleNames []string
- LiteralNames []string
- SymbolicNames []string
- GrammarFileName string
-}
-
-func NewBaseRecognizer() *BaseRecognizer {
- rec := new(BaseRecognizer)
- rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
- rec.state = -1
- return rec
-}
-
-var tokenTypeMapCache = make(map[string]int)
-var ruleIndexMapCache = make(map[string]int)
-
-func (b *BaseRecognizer) checkVersion(toolVersion string) {
- runtimeVersion := "4.12.0"
- if runtimeVersion != toolVersion {
- fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
- }
-}
-
-func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
- panic("action not implemented on Recognizer!")
-}
-
-func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
- b.listeners = append(b.listeners, listener)
-}
-
-func (b *BaseRecognizer) RemoveErrorListeners() {
- b.listeners = make([]ErrorListener, 0)
-}
-
-func (b *BaseRecognizer) GetRuleNames() []string {
- return b.RuleNames
-}
-
-func (b *BaseRecognizer) GetTokenNames() []string {
- return b.LiteralNames
-}
-
-func (b *BaseRecognizer) GetSymbolicNames() []string {
- return b.SymbolicNames
-}
-
-func (b *BaseRecognizer) GetLiteralNames() []string {
- return b.LiteralNames
-}
-
-func (b *BaseRecognizer) GetState() int {
- return b.state
-}
-
-func (b *BaseRecognizer) SetState(v int) {
- b.state = v
-}
-
-//func (b *Recognizer) GetTokenTypeMap() {
-// var tokenNames = b.GetTokenNames()
-// if (tokenNames==nil) {
-// panic("The current recognizer does not provide a list of token names.")
-// }
-// var result = tokenTypeMapCache[tokenNames]
-// if(result==nil) {
-// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
-// result.EOF = TokenEOF
-// tokenTypeMapCache[tokenNames] = result
-// }
-// return result
-//}
-
-// Get a map from rule names to rule indexes.
-//
-// Used for XPath and tree pattern compilation.
-func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
-
- panic("Method not defined!")
- // var ruleNames = b.GetRuleNames()
- // if (ruleNames==nil) {
- // panic("The current recognizer does not provide a list of rule names.")
- // }
- //
- // var result = ruleIndexMapCache[ruleNames]
- // if(result==nil) {
- // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
- // ruleIndexMapCache[ruleNames] = result
- // }
- // return result
-}
-
-func (b *BaseRecognizer) GetTokenType(tokenName string) int {
- panic("Method not defined!")
- // var ttype = b.GetTokenTypeMap()[tokenName]
- // if (ttype !=nil) {
- // return ttype
- // } else {
- // return TokenInvalidType
- // }
-}
-
-//func (b *Recognizer) GetTokenTypeMap() map[string]int {
-// Vocabulary vocabulary = getVocabulary()
-//
-// Synchronized (tokenTypeMapCache) {
-// Map result = tokenTypeMapCache.Get(vocabulary)
-// if (result == null) {
-// result = new HashMap()
-// for (int i = 0; i < GetATN().maxTokenType; i++) {
-// String literalName = vocabulary.getLiteralName(i)
-// if (literalName != null) {
-// result.put(literalName, i)
-// }
-//
-// String symbolicName = vocabulary.GetSymbolicName(i)
-// if (symbolicName != null) {
-// result.put(symbolicName, i)
-// }
-// }
-//
-// result.put("EOF", Token.EOF)
-// result = Collections.unmodifiableMap(result)
-// tokenTypeMapCache.put(vocabulary, result)
-// }
-//
-// return result
-// }
-//}
-
-// What is the error header, normally line/character position information?//
-func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
- line := e.GetOffendingToken().GetLine()
- column := e.GetOffendingToken().GetColumn()
- return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
-}
-
-// How should a token be displayed in an error message? The default
-//
-// is to display just the text, but during development you might
-// want to have a lot of information spit out. Override in that case
-// to use t.String() (which, for CommonToken, dumps everything about
-// the token). This is better than forcing you to override a method in
-// your token objects because you don't have to go modify your lexer
-// so that it creates a NewJava type.
-//
-// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
-// implementations of {@link ANTLRErrorStrategy} may provide a similar
-// feature when necessary. For example, see
-// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
-func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
- if t == nil {
- return ""
- }
- s := t.GetText()
- if s == "" {
- if t.GetTokenType() == TokenEOF {
- s = ""
- } else {
- s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
- }
- }
- s = strings.Replace(s, "\t", "\\t", -1)
- s = strings.Replace(s, "\n", "\\n", -1)
- s = strings.Replace(s, "\r", "\\r", -1)
-
- return "'" + s + "'"
-}
-
-func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
- return NewProxyErrorListener(b.listeners)
-}
-
-// subclass needs to override these if there are sempreds or actions
-// that the ATN interp needs to execute
-func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
- return true
-}
-
-func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
- return true
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
deleted file mode 100644
index 210699ba2..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// A rule context is a record of a single rule invocation. It knows
-// which context invoked it, if any. If there is no parent context, then
-// naturally the invoking state is not valid. The parent link
-// provides a chain upwards from the current rule invocation to the root
-// of the invocation tree, forming a stack. We actually carry no
-// information about the rule associated with b context (except
-// when parsing). We keep only the state number of the invoking state from
-// the ATN submachine that invoked b. Contrast b with the s
-// pointer inside ParserRuleContext that tracks the current state
-// being "executed" for the current rule.
-//
-// The parent contexts are useful for computing lookahead sets and
-// getting error information.
-//
-// These objects are used during parsing and prediction.
-// For the special case of parsers, we use the subclass
-// ParserRuleContext.
-//
-// @see ParserRuleContext
-//
-
-type RuleContext interface {
- RuleNode
-
- GetInvokingState() int
- SetInvokingState(int)
-
- GetRuleIndex() int
- IsEmpty() bool
-
- GetAltNumber() int
- SetAltNumber(altNumber int)
-
- String([]string, RuleContext) string
-}
-
-type BaseRuleContext struct {
- parentCtx RuleContext
- invokingState int
- RuleIndex int
-}
-
-func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
-
- rn := new(BaseRuleContext)
-
- // What context invoked b rule?
- rn.parentCtx = parent
-
- // What state invoked the rule associated with b context?
- // The "return address" is the followState of invokingState
- // If parent is nil, b should be -1.
- if parent == nil {
- rn.invokingState = -1
- } else {
- rn.invokingState = invokingState
- }
-
- return rn
-}
-
-func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
- return b
-}
-
-func (b *BaseRuleContext) SetParent(v Tree) {
- if v == nil {
- b.parentCtx = nil
- } else {
- b.parentCtx = v.(RuleContext)
- }
-}
-
-func (b *BaseRuleContext) GetInvokingState() int {
- return b.invokingState
-}
-
-func (b *BaseRuleContext) SetInvokingState(t int) {
- b.invokingState = t
-}
-
-func (b *BaseRuleContext) GetRuleIndex() int {
- return b.RuleIndex
-}
-
-func (b *BaseRuleContext) GetAltNumber() int {
- return ATNInvalidAltNumber
-}
-
-func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
-
-// A context is empty if there is no invoking state meaning nobody call
-// current context.
-func (b *BaseRuleContext) IsEmpty() bool {
- return b.invokingState == -1
-}
-
-// Return the combined text of all child nodes. This method only considers
-// tokens which have been added to the parse tree.
-//
-// Since tokens on hidden channels (e.g. whitespace or comments) are not
-// added to the parse trees, they will not appear in the output of b
-// method.
-//
-
-func (b *BaseRuleContext) GetParent() Tree {
- return b.parentCtx
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
deleted file mode 100644
index f73b06bc6..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "strconv"
- "strings"
-)
-
-type TokenSourceCharStreamPair struct {
- tokenSource TokenSource
- charStream CharStream
-}
-
-// A token has properties: text, type, line, character position in the line
-// (so we can ignore tabs), token channel, index, and source from which
-// we obtained this token.
-
-type Token interface {
- GetSource() *TokenSourceCharStreamPair
- GetTokenType() int
- GetChannel() int
- GetStart() int
- GetStop() int
- GetLine() int
- GetColumn() int
-
- GetText() string
- SetText(s string)
-
- GetTokenIndex() int
- SetTokenIndex(v int)
-
- GetTokenSource() TokenSource
- GetInputStream() CharStream
-}
-
-type BaseToken struct {
- source *TokenSourceCharStreamPair
- tokenType int // token type of the token
- channel int // The parser ignores everything not on DEFAULT_CHANNEL
- start int // optional return -1 if not implemented.
- stop int // optional return -1 if not implemented.
- tokenIndex int // from 0..n-1 of the token object in the input stream
- line int // line=1..n of the 1st character
- column int // beginning of the line at which it occurs, 0..n-1
- text string // text of the token.
- readOnly bool
-}
-
-const (
- TokenInvalidType = 0
-
- // During lookahead operations, this "token" signifies we hit rule end ATN state
- // and did not follow it despite needing to.
- TokenEpsilon = -2
-
- TokenMinUserTokenType = 1
-
- TokenEOF = -1
-
- // All tokens go to the parser (unless Skip() is called in that rule)
- // on a particular "channel". The parser tunes to a particular channel
- // so that whitespace etc... can go to the parser on a "hidden" channel.
-
- TokenDefaultChannel = 0
-
- // Anything on different channel than DEFAULT_CHANNEL is not parsed
- // by parser.
-
- TokenHiddenChannel = 1
-)
-
-func (b *BaseToken) GetChannel() int {
- return b.channel
-}
-
-func (b *BaseToken) GetStart() int {
- return b.start
-}
-
-func (b *BaseToken) GetStop() int {
- return b.stop
-}
-
-func (b *BaseToken) GetLine() int {
- return b.line
-}
-
-func (b *BaseToken) GetColumn() int {
- return b.column
-}
-
-func (b *BaseToken) GetTokenType() int {
- return b.tokenType
-}
-
-func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
- return b.source
-}
-
-func (b *BaseToken) GetTokenIndex() int {
- return b.tokenIndex
-}
-
-func (b *BaseToken) SetTokenIndex(v int) {
- b.tokenIndex = v
-}
-
-func (b *BaseToken) GetTokenSource() TokenSource {
- return b.source.tokenSource
-}
-
-func (b *BaseToken) GetInputStream() CharStream {
- return b.source.charStream
-}
-
-type CommonToken struct {
- *BaseToken
-}
-
-func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
-
- t := new(CommonToken)
-
- t.BaseToken = new(BaseToken)
-
- t.source = source
- t.tokenType = tokenType
- t.channel = channel
- t.start = start
- t.stop = stop
- t.tokenIndex = -1
- if t.source.tokenSource != nil {
- t.line = source.tokenSource.GetLine()
- t.column = source.tokenSource.GetCharPositionInLine()
- } else {
- t.column = -1
- }
- return t
-}
-
-// An empty {@link Pair} which is used as the default value of
-// {@link //source} for tokens that do not have a source.
-
-//CommonToken.EMPTY_SOURCE = [ nil, nil ]
-
-// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
-//
-//
-// If {@code oldToken} is also a {@link CommonToken} instance, the newly
-// constructed token will share a reference to the {@link //text} field and
-// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
-// be assigned the result of calling {@link //GetText}, and {@link //source}
-// will be constructed from the result of {@link Token//GetTokenSource} and
-// {@link Token//GetInputStream}.
-//
-// @param oldToken The token to copy.
-func (c *CommonToken) clone() *CommonToken {
- t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
- t.tokenIndex = c.GetTokenIndex()
- t.line = c.GetLine()
- t.column = c.GetColumn()
- t.text = c.GetText()
- return t
-}
-
-func (c *CommonToken) GetText() string {
- if c.text != "" {
- return c.text
- }
- input := c.GetInputStream()
- if input == nil {
- return ""
- }
- n := input.Size()
- if c.start < n && c.stop < n {
- return input.GetTextFromInterval(NewInterval(c.start, c.stop))
- }
- return ""
-}
-
-func (c *CommonToken) SetText(text string) {
- c.text = text
-}
-
-func (c *CommonToken) String() string {
- txt := c.GetText()
- if txt != "" {
- txt = strings.Replace(txt, "\n", "\\n", -1)
- txt = strings.Replace(txt, "\r", "\\r", -1)
- txt = strings.Replace(txt, "\t", "\\t", -1)
- } else {
- txt = ""
- }
-
- var ch string
- if c.channel > 0 {
- ch = ",channel=" + strconv.Itoa(c.channel)
- } else {
- ch = ""
- }
-
- return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
- txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
- ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
deleted file mode 100644
index b3e38af34..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go
+++ /dev/null
@@ -1,659 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bytes"
- "fmt"
-)
-
-//
-// Useful for rewriting out a buffered input token stream after doing some
-// augmentation or other manipulations on it.
-
-//
-// You can insert stuff, replace, and delete chunks. Note that the operations
-// are done lazily--only if you convert the buffer to a {@link String} with
-// {@link TokenStream#getText()}. This is very efficient because you are not
-// moving data around all the time. As the buffer of tokens is converted to
-// strings, the {@link #getText()} method(s) scan the input token stream and
-// check to see if there is an operation at the current index. If so, the
-// operation is done and then normal {@link String} rendering continues on the
-// buffer. This is like having multiple Turing machine instruction streams
-// (programs) operating on a single input tape. :)
-//
-
-// This rewriter makes no modifications to the token stream. It does not ask the
-// stream to fill itself up nor does it advance the input cursor. The token
-// stream {@link TokenStream#index()} will return the same value before and
-// after any {@link #getText()} call.
-
-//
-// The rewriter only works on tokens that you have in the buffer and ignores the
-// current input cursor. If you are buffering tokens on-demand, calling
-// {@link #getText()} halfway through the input will only do rewrites for those
-// tokens in the first half of the file.
-
-//
-// Since the operations are done lazily at {@link #getText}-time, operations do
-// not screw up the token index values. That is, an insert operation at token
-// index {@code i} does not change the index values for tokens
-// {@code i}+1..n-1.
-
-//
-// Because operations never actually alter the buffer, you may always get the
-// original token stream back without undoing anything. Since the instructions
-// are queued up, you can easily simulate transactions and roll back any changes
-// if there is an error just by removing instructions. For example,
-
-//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-//
-
-//
-// Then in the rules, you can execute (assuming rewriter is visible):
-
-//
-// Token t,u;
-// ...
-// rewriter.insertAfter(t, "text to put after t");}
-// rewriter.insertAfter(u, "text after u");}
-// System.out.println(rewriter.getText());
-//
-
-//
-// You can also have multiple "instruction streams" and get multiple rewrites
-// from a single pass over the input. Just name the instruction streams and use
-// that name again when printing the buffer. This could be useful for generating
-// a C file and also its header file--all from the same buffer:
-
-//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-//
-
-//
-// If you don't use named rewrite streams, a "default" stream is used as the
-// first example shows.
-
-const (
- Default_Program_Name = "default"
- Program_Init_Size = 100
- Min_Token_Index = 0
-)
-
-// Define the rewrite operation hierarchy
-
-type RewriteOperation interface {
- // Execute the rewrite operation by possibly adding to the buffer.
- // Return the index of the next token to operate on.
- Execute(buffer *bytes.Buffer) int
- String() string
- GetInstructionIndex() int
- GetIndex() int
- GetText() string
- GetOpName() string
- GetTokens() TokenStream
- SetInstructionIndex(val int)
- SetIndex(int)
- SetText(string)
- SetOpName(string)
- SetTokens(TokenStream)
-}
-
-type BaseRewriteOperation struct {
- //Current index of rewrites list
- instruction_index int
- //Token buffer index
- index int
- //Substitution text
- text string
- //Actual operation name
- op_name string
- //Pointer to token steam
- tokens TokenStream
-}
-
-func (op *BaseRewriteOperation) GetInstructionIndex() int {
- return op.instruction_index
-}
-
-func (op *BaseRewriteOperation) GetIndex() int {
- return op.index
-}
-
-func (op *BaseRewriteOperation) GetText() string {
- return op.text
-}
-
-func (op *BaseRewriteOperation) GetOpName() string {
- return op.op_name
-}
-
-func (op *BaseRewriteOperation) GetTokens() TokenStream {
- return op.tokens
-}
-
-func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
- op.instruction_index = val
-}
-
-func (op *BaseRewriteOperation) SetIndex(val int) {
- op.index = val
-}
-
-func (op *BaseRewriteOperation) SetText(val string) {
- op.text = val
-}
-
-func (op *BaseRewriteOperation) SetOpName(val string) {
- op.op_name = val
-}
-
-func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
- op.tokens = val
-}
-
-func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int {
- return op.index
-}
-
-func (op *BaseRewriteOperation) String() string {
- return fmt.Sprintf("<%s@%d:\"%s\">",
- op.op_name,
- op.tokens.Get(op.GetIndex()),
- op.text,
- )
-
-}
-
-type InsertBeforeOp struct {
- BaseRewriteOperation
-}
-
-func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
- return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
- index: index,
- text: text,
- op_name: "InsertBeforeOp",
- tokens: stream,
- }}
-}
-
-func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
- buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
- buffer.WriteString(op.tokens.Get(op.index).GetText())
- }
- return op.index + 1
-}
-
-func (op *InsertBeforeOp) String() string {
- return op.BaseRewriteOperation.String()
-}
-
-// Distinguish between insert after/before to do the "insert afters"
-// first and then the "insert befores" at same index. Implementation
-// of "insert after" is "insert before index+1".
-
-type InsertAfterOp struct {
- BaseRewriteOperation
-}
-
-func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
- return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{
- index: index + 1,
- text: text,
- tokens: stream,
- }}
-}
-
-func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
- buffer.WriteString(op.text)
- if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
- buffer.WriteString(op.tokens.Get(op.index).GetText())
- }
- return op.index + 1
-}
-
-func (op *InsertAfterOp) String() string {
- return op.BaseRewriteOperation.String()
-}
-
-// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
-// instructions.
-type ReplaceOp struct {
- BaseRewriteOperation
- LastIndex int
-}
-
-func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
- return &ReplaceOp{
- BaseRewriteOperation: BaseRewriteOperation{
- index: from,
- text: text,
- op_name: "ReplaceOp",
- tokens: stream,
- },
- LastIndex: to,
- }
-}
-
-func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
- if op.text != "" {
- buffer.WriteString(op.text)
- }
- return op.LastIndex + 1
-}
-
-func (op *ReplaceOp) String() string {
- if op.text == "" {
- return fmt.Sprintf("",
- op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
- }
- return fmt.Sprintf("",
- op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
-}
-
-type TokenStreamRewriter struct {
- //Our source stream
- tokens TokenStream
- // You may have multiple, named streams of rewrite operations.
- // I'm calling these things "programs."
- // Maps String (name) → rewrite (List)
- programs map[string][]RewriteOperation
- last_rewrite_token_indexes map[string]int
-}
-
-func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
- return &TokenStreamRewriter{
- tokens: tokens,
- programs: map[string][]RewriteOperation{
- Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size),
- },
- last_rewrite_token_indexes: map[string]int{},
- }
-}
-
-func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
- return tsr.tokens
-}
-
-// Rollback the instruction stream for a program so that
-// the indicated instruction (via instructionIndex) is no
-// longer in the stream. UNTESTED!
-func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) {
- is, ok := tsr.programs[program_name]
- if ok {
- tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
- }
-}
-
-func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) {
- tsr.Rollback(Default_Program_Name, instruction_index)
-}
-
-// Reset the program so that no instructions exist
-func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) {
- tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
-}
-
-func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
- tsr.DeleteProgram(Default_Program_Name)
-}
-
-func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) {
- // to insert after, just insert before next index (even if past end)
- var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(program_name)
- op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(program_name, op)
-}
-
-func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
- tsr.InsertAfter(Default_Program_Name, index, text)
-}
-
-func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) {
- tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
-}
-
-func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) {
- var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
- rewrites := tsr.GetProgram(program_name)
- op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(program_name, op)
-}
-
-func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
- tsr.InsertBefore(Default_Program_Name, index, text)
-}
-
-func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) {
- tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
-}
-
-func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) {
- if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
- panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
- from, to, tsr.tokens.Size()))
- }
- var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
- rewrites := tsr.GetProgram(program_name)
- op.SetInstructionIndex(len(rewrites))
- tsr.AddToProgram(program_name, op)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
- tsr.Replace(Default_Program_Name, from, to, text)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
- tsr.ReplaceDefault(index, index, text)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) {
- tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
- tsr.ReplaceToken(Default_Program_Name, from, to, text)
-}
-
-func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
- tsr.ReplaceTokenDefault(index, index, text)
-}
-
-func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) {
- tsr.Replace(program_name, from, to, "")
-}
-
-func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
- tsr.Delete(Default_Program_Name, from, to)
-}
-
-func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
- tsr.DeleteDefault(index, index)
-}
-
-func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) {
- tsr.ReplaceToken(program_name, from, to, "")
-}
-
-func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
- tsr.DeleteToken(Default_Program_Name, from, to)
-}
-
-func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int {
- i, ok := tsr.last_rewrite_token_indexes[program_name]
- if !ok {
- return -1
- }
- return i
-}
-
-func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
- return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
-}
-
-func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) {
- tsr.last_rewrite_token_indexes[program_name] = i
-}
-
-func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
- is := make([]RewriteOperation, 0, Program_Init_Size)
- tsr.programs[name] = is
- return is
-}
-
-func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
- is := tsr.GetProgram(name)
- is = append(is, op)
- tsr.programs[name] = is
-}
-
-func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
- is, ok := tsr.programs[name]
- if !ok {
- is = tsr.InitializeProgram(name)
- }
- return is
-}
-
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter) GetTextDefault() string {
- return tsr.GetText(
- Default_Program_Name,
- NewInterval(0, tsr.tokens.Size()-1))
-}
-
-// Return the text from the original tokens altered per the
-// instructions given to this rewriter.
-func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string {
- rewrites := tsr.programs[program_name]
- start := interval.Start
- stop := interval.Stop
- // ensure start/end are in range
- stop = min(stop, tsr.tokens.Size()-1)
- start = max(start, 0)
- if rewrites == nil || len(rewrites) == 0 {
- return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
- }
- buf := bytes.Buffer{}
- // First, optimize instruction stream
- indexToOp := reduceToSingleOperationPerIndex(rewrites)
- // Walk buffer, executing instructions and emitting tokens
- for i := start; i <= stop && i < tsr.tokens.Size(); {
- op := indexToOp[i]
- delete(indexToOp, i) // remove so any left have index size-1
- t := tsr.tokens.Get(i)
- if op == nil {
- // no operation at that index, just dump token
- if t.GetTokenType() != TokenEOF {
- buf.WriteString(t.GetText())
- }
- i++ // move to next token
- } else {
- i = op.Execute(&buf) // execute operation and skip
- }
- }
- // include stuff after end if it's last index in buffer
- // So, if they did an insertAfter(lastValidIndex, "foo"), include
- // foo if end==lastValidIndex.
- if stop == tsr.tokens.Size()-1 {
- // Scan any remaining operations after last token
- // should be included (they will be inserts).
- for _, op := range indexToOp {
- if op.GetIndex() >= tsr.tokens.Size()-1 {
- buf.WriteString(op.GetText())
- }
- }
- }
- return buf.String()
-}
-
-// We need to combine operations and report invalid operations (like
-// overlapping replaces that are not completed nested). Inserts to
-// same index need to be combined etc... Here are the cases:
-//
-// I.i.u I.j.v leave alone, nonoverlapping
-// I.i.u I.i.v combine: Iivu
-//
-// R.i-j.u R.x-y.v | i-j in x-y delete first R
-// R.i-j.u R.i-j.v delete first R
-// R.i-j.u R.x-y.v | x-y in i-j ERROR
-// R.i-j.u R.x-y.v | boundaries overlap ERROR
-//
-// Delete special case of replace (text==null):
-// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
-//
-// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
-// we're not deleting i)
-// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
-// R.x-y.v I.i.u | i in x-y ERROR
-// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
-// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
-//
-// I.i.u = insert u before op @ index i
-// R.x-y.u = replace x-y indexed tokens with u
-//
-// First we need to examine replaces. For any replace op:
-//
-// 1. wipe out any insertions before op within that range.
-// 2. Drop any replace op before that is contained completely within
-// that range.
-// 3. Throw exception upon boundary overlap with any previous replace.
-//
-// Then we can deal with inserts:
-//
-// 1. for any inserts to same index, combine even if not adjacent.
-// 2. for any prior replace with same left boundary, combine this
-// insert with replace and delete this replace.
-// 3. throw exception if index in same range as previous replace
-//
-// Don't actually delete; make op null in list. Easier to walk list.
-// Later we can throw as we add to index → op map.
-//
-// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
-// inserted stuff would be before the replace range. But, if you
-// add tokens in front of a method body '{' and then delete the method
-// body, I think the stuff before the '{' you added should disappear too.
-//
-// Return a map from token index to operation.
-func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
- // WALK REPLACES
- for i := 0; i < len(rewrites); i++ {
- op := rewrites[i]
- if op == nil {
- continue
- }
- rop, ok := op.(*ReplaceOp)
- if !ok {
- continue
- }
- // Wipe prior inserts within range
- for j := 0; j < i && j < len(rewrites); j++ {
- if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
- if iop.index == rop.index {
- // E.g., insert before 2, delete 2..2; update replace
- // text to include insert before, kill insert
- rewrites[iop.instruction_index] = nil
- if rop.text != "" {
- rop.text = iop.text + rop.text
- } else {
- rop.text = iop.text
- }
- } else if iop.index > rop.index && iop.index <= rop.LastIndex {
- // delete insert as it's a no-op.
- rewrites[iop.instruction_index] = nil
- }
- }
- }
- // Drop any prior replaces contained within
- for j := 0; j < i && j < len(rewrites); j++ {
- if prevop, ok := rewrites[j].(*ReplaceOp); ok {
- if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
- // delete replace as it's a no-op.
- rewrites[prevop.instruction_index] = nil
- continue
- }
- // throw exception unless disjoint or identical
- disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
- // Delete special case of replace (text==null):
- // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
- if prevop.text == "" && rop.text == "" && !disjoint {
- rewrites[prevop.instruction_index] = nil
- rop.index = min(prevop.index, rop.index)
- rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
- println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
- } else if !disjoint {
- panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
- }
- }
- }
- }
- // WALK INSERTS
- for i := 0; i < len(rewrites); i++ {
- op := rewrites[i]
- if op == nil {
- continue
- }
- //hack to replicate inheritance in composition
- _, iok := rewrites[i].(*InsertBeforeOp)
- _, aok := rewrites[i].(*InsertAfterOp)
- if !iok && !aok {
- continue
- }
- iop := rewrites[i]
- // combine current insert with prior if any at same index
- // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
- for j := 0; j < i && j < len(rewrites); j++ {
- if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
- if nextIop.index == iop.GetIndex() {
- iop.SetText(nextIop.text + iop.GetText())
- rewrites[j] = nil
- }
- }
- if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
- if prevIop.index == iop.GetIndex() {
- iop.SetText(iop.GetText() + prevIop.text)
- rewrites[prevIop.instruction_index] = nil
- }
- }
- }
- // look for replaces where iop.index is in range; error
- for j := 0; j < i && j < len(rewrites); j++ {
- if rop, ok := rewrites[j].(*ReplaceOp); ok {
- if iop.GetIndex() == rop.index {
- rop.text = iop.GetText() + rop.text
- rewrites[i] = nil
- continue
- }
- if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
- panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
- }
- }
- }
- }
- m := map[int]RewriteOperation{}
- for i := 0; i < len(rewrites); i++ {
- op := rewrites[i]
- if op == nil {
- continue
- }
- if _, ok := m[op.GetIndex()]; ok {
- panic("should only be one op per index")
- }
- m[op.GetIndex()] = op
- }
- return m
-}
-
-/*
- Quick fixing Go lack of overloads
-*/
-
-func max(a, b int) int {
- if a > b {
- return a
- } else {
- return b
- }
-}
-func min(a, b int) int {
- if a < b {
- return a
- } else {
- return b
- }
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
deleted file mode 100644
index 36be4f733..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go
+++ /dev/null
@@ -1,428 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// atom, set, epsilon, action, predicate, rule transitions.
-//
-// This is a one way link. It emanates from a state (usually via a list of
-// transitions) and has a target state.
-//
-// Since we never have to change the ATN transitions once we construct it,
-// the states. We'll use the term Edge for the DFA to distinguish them from
-// ATN transitions.
-
-type Transition interface {
- getTarget() ATNState
- setTarget(ATNState)
- getIsEpsilon() bool
- getLabel() *IntervalSet
- getSerializationType() int
- Matches(int, int, int) bool
-}
-
-type BaseTransition struct {
- target ATNState
- isEpsilon bool
- label int
- intervalSet *IntervalSet
- serializationType int
-}
-
-func NewBaseTransition(target ATNState) *BaseTransition {
-
- if target == nil {
- panic("target cannot be nil.")
- }
-
- t := new(BaseTransition)
-
- t.target = target
- // Are we epsilon, action, sempred?
- t.isEpsilon = false
- t.intervalSet = nil
-
- return t
-}
-
-func (t *BaseTransition) getTarget() ATNState {
- return t.target
-}
-
-func (t *BaseTransition) setTarget(s ATNState) {
- t.target = s
-}
-
-func (t *BaseTransition) getIsEpsilon() bool {
- return t.isEpsilon
-}
-
-func (t *BaseTransition) getLabel() *IntervalSet {
- return t.intervalSet
-}
-
-func (t *BaseTransition) getSerializationType() int {
- return t.serializationType
-}
-
-func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- panic("Not implemented")
-}
-
-const (
- TransitionEPSILON = 1
- TransitionRANGE = 2
- TransitionRULE = 3
- TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
- TransitionATOM = 5
- TransitionACTION = 6
- TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
- TransitionNOTSET = 8
- TransitionWILDCARD = 9
- TransitionPRECEDENCE = 10
-)
-
-var TransitionserializationNames = []string{
- "INVALID",
- "EPSILON",
- "RANGE",
- "RULE",
- "PREDICATE",
- "ATOM",
- "ACTION",
- "SET",
- "NOT_SET",
- "WILDCARD",
- "PRECEDENCE",
-}
-
-//var TransitionserializationTypes struct {
-// EpsilonTransition int
-// RangeTransition int
-// RuleTransition int
-// PredicateTransition int
-// AtomTransition int
-// ActionTransition int
-// SetTransition int
-// NotSetTransition int
-// WildcardTransition int
-// PrecedencePredicateTransition int
-//}{
-// TransitionEPSILON,
-// TransitionRANGE,
-// TransitionRULE,
-// TransitionPREDICATE,
-// TransitionATOM,
-// TransitionACTION,
-// TransitionSET,
-// TransitionNOTSET,
-// TransitionWILDCARD,
-// TransitionPRECEDENCE
-//}
-
-// TODO: make all transitions sets? no, should remove set edges
-type AtomTransition struct {
- *BaseTransition
-}
-
-func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
-
- t := new(AtomTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.label = intervalSet // The token type or character value or, signifies special intervalSet.
- t.intervalSet = t.makeLabel()
- t.serializationType = TransitionATOM
-
- return t
-}
-
-func (t *AtomTransition) makeLabel() *IntervalSet {
- s := NewIntervalSet()
- s.addOne(t.label)
- return s
-}
-
-func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return t.label == symbol
-}
-
-func (t *AtomTransition) String() string {
- return strconv.Itoa(t.label)
-}
-
-type RuleTransition struct {
- *BaseTransition
-
- followState ATNState
- ruleIndex, precedence int
-}
-
-func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
-
- t := new(RuleTransition)
- t.BaseTransition = NewBaseTransition(ruleStart)
-
- t.ruleIndex = ruleIndex
- t.precedence = precedence
- t.followState = followState
- t.serializationType = TransitionRULE
- t.isEpsilon = true
-
- return t
-}
-
-func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-type EpsilonTransition struct {
- *BaseTransition
-
- outermostPrecedenceReturn int
-}
-
-func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
-
- t := new(EpsilonTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionEPSILON
- t.isEpsilon = true
- t.outermostPrecedenceReturn = outermostPrecedenceReturn
- return t
-}
-
-func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *EpsilonTransition) String() string {
- return "epsilon"
-}
-
-type RangeTransition struct {
- *BaseTransition
-
- start, stop int
-}
-
-func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
-
- t := new(RangeTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionRANGE
- t.start = start
- t.stop = stop
- t.intervalSet = t.makeLabel()
- return t
-}
-
-func (t *RangeTransition) makeLabel() *IntervalSet {
- s := NewIntervalSet()
- s.addRange(t.start, t.stop)
- return s
-}
-
-func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return symbol >= t.start && symbol <= t.stop
-}
-
-func (t *RangeTransition) String() string {
- var sb strings.Builder
- sb.WriteByte('\'')
- sb.WriteRune(rune(t.start))
- sb.WriteString("'..'")
- sb.WriteRune(rune(t.stop))
- sb.WriteByte('\'')
- return sb.String()
-}
-
-type AbstractPredicateTransition interface {
- Transition
- IAbstractPredicateTransitionFoo()
-}
-
-type BaseAbstractPredicateTransition struct {
- *BaseTransition
-}
-
-func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
-
- t := new(BaseAbstractPredicateTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- return t
-}
-
-func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
-
-type PredicateTransition struct {
- *BaseAbstractPredicateTransition
-
- isCtxDependent bool
- ruleIndex, predIndex int
-}
-
-func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
-
- t := new(PredicateTransition)
- t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
-
- t.serializationType = TransitionPREDICATE
- t.ruleIndex = ruleIndex
- t.predIndex = predIndex
- t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
- t.isEpsilon = true
- return t
-}
-
-func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *PredicateTransition) getPredicate() *Predicate {
- return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
-}
-
-func (t *PredicateTransition) String() string {
- return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
-}
-
-type ActionTransition struct {
- *BaseTransition
-
- isCtxDependent bool
- ruleIndex, actionIndex, predIndex int
-}
-
-func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
-
- t := new(ActionTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionACTION
- t.ruleIndex = ruleIndex
- t.actionIndex = actionIndex
- t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
- t.isEpsilon = true
- return t
-}
-
-func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *ActionTransition) String() string {
- return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
-}
-
-type SetTransition struct {
- *BaseTransition
-}
-
-func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
-
- t := new(SetTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionSET
- if set != nil {
- t.intervalSet = set
- } else {
- t.intervalSet = NewIntervalSet()
- t.intervalSet.addOne(TokenInvalidType)
- }
-
- return t
-}
-
-func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return t.intervalSet.contains(symbol)
-}
-
-func (t *SetTransition) String() string {
- return t.intervalSet.String()
-}
-
-type NotSetTransition struct {
- *SetTransition
-}
-
-func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
-
- t := new(NotSetTransition)
-
- t.SetTransition = NewSetTransition(target, set)
-
- t.serializationType = TransitionNOTSET
-
- return t
-}
-
-func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
-}
-
-func (t *NotSetTransition) String() string {
- return "~" + t.intervalSet.String()
-}
-
-type WildcardTransition struct {
- *BaseTransition
-}
-
-func NewWildcardTransition(target ATNState) *WildcardTransition {
-
- t := new(WildcardTransition)
- t.BaseTransition = NewBaseTransition(target)
-
- t.serializationType = TransitionWILDCARD
- return t
-}
-
-func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
-}
-
-func (t *WildcardTransition) String() string {
- return "."
-}
-
-type PrecedencePredicateTransition struct {
- *BaseAbstractPredicateTransition
-
- precedence int
-}
-
-func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
-
- t := new(PrecedencePredicateTransition)
- t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
-
- t.serializationType = TransitionPRECEDENCE
- t.precedence = precedence
- t.isEpsilon = true
-
- return t
-}
-
-func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
- return false
-}
-
-func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
- return NewPrecedencePredicate(t.precedence)
-}
-
-func (t *PrecedencePredicateTransition) String() string {
- return fmt.Sprint(t.precedence) + " >= _p"
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
deleted file mode 100644
index 85b4f137b..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-// The basic notion of a tree has a parent, a payload, and a list of children.
-// It is the most abstract interface for all the trees used by ANTLR.
-///
-
-var TreeInvalidInterval = NewInterval(-1, -2)
-
-type Tree interface {
- GetParent() Tree
- SetParent(Tree)
- GetPayload() interface{}
- GetChild(i int) Tree
- GetChildCount() int
- GetChildren() []Tree
-}
-
-type SyntaxTree interface {
- Tree
-
- GetSourceInterval() *Interval
-}
-
-type ParseTree interface {
- SyntaxTree
-
- Accept(Visitor ParseTreeVisitor) interface{}
- GetText() string
-
- ToStringTree([]string, Recognizer) string
-}
-
-type RuleNode interface {
- ParseTree
-
- GetRuleContext() RuleContext
- GetBaseRuleContext() *BaseRuleContext
-}
-
-type TerminalNode interface {
- ParseTree
-
- GetSymbol() Token
-}
-
-type ErrorNode interface {
- TerminalNode
-
- errorNode()
-}
-
-type ParseTreeVisitor interface {
- Visit(tree ParseTree) interface{}
- VisitChildren(node RuleNode) interface{}
- VisitTerminal(node TerminalNode) interface{}
- VisitErrorNode(node ErrorNode) interface{}
-}
-
-type BaseParseTreeVisitor struct{}
-
-var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
-
-func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
-func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
-func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
-
-// TODO
-//func (this ParseTreeVisitor) Visit(ctx) {
-// if (Utils.isArray(ctx)) {
-// self := this
-// return ctx.map(function(child) { return VisitAtom(self, child)})
-// } else {
-// return VisitAtom(this, ctx)
-// }
-//}
-//
-//func VisitAtom(Visitor, ctx) {
-// if (ctx.parser == nil) { //is terminal
-// return
-// }
-//
-// name := ctx.parser.ruleNames[ctx.ruleIndex]
-// funcName := "Visit" + Utils.titleCase(name)
-//
-// return Visitor[funcName](ctx)
-//}
-
-type ParseTreeListener interface {
- VisitTerminal(node TerminalNode)
- VisitErrorNode(node ErrorNode)
- EnterEveryRule(ctx ParserRuleContext)
- ExitEveryRule(ctx ParserRuleContext)
-}
-
-type BaseParseTreeListener struct{}
-
-var _ ParseTreeListener = &BaseParseTreeListener{}
-
-func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
-func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
-func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
-func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
-
-type TerminalNodeImpl struct {
- parentCtx RuleContext
-
- symbol Token
-}
-
-var _ TerminalNode = &TerminalNodeImpl{}
-
-func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
- tn := new(TerminalNodeImpl)
-
- tn.parentCtx = nil
- tn.symbol = symbol
-
- return tn
-}
-
-func (t *TerminalNodeImpl) GetChild(i int) Tree {
- return nil
-}
-
-func (t *TerminalNodeImpl) GetChildren() []Tree {
- return nil
-}
-
-func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
- panic("Cannot set children on terminal node")
-}
-
-func (t *TerminalNodeImpl) GetSymbol() Token {
- return t.symbol
-}
-
-func (t *TerminalNodeImpl) GetParent() Tree {
- return t.parentCtx
-}
-
-func (t *TerminalNodeImpl) SetParent(tree Tree) {
- t.parentCtx = tree.(RuleContext)
-}
-
-func (t *TerminalNodeImpl) GetPayload() interface{} {
- return t.symbol
-}
-
-func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
- if t.symbol == nil {
- return TreeInvalidInterval
- }
- tokenIndex := t.symbol.GetTokenIndex()
- return NewInterval(tokenIndex, tokenIndex)
-}
-
-func (t *TerminalNodeImpl) GetChildCount() int {
- return 0
-}
-
-func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
- return v.VisitTerminal(t)
-}
-
-func (t *TerminalNodeImpl) GetText() string {
- return t.symbol.GetText()
-}
-
-func (t *TerminalNodeImpl) String() string {
- if t.symbol.GetTokenType() == TokenEOF {
- return ""
- }
-
- return t.symbol.GetText()
-}
-
-func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
- return t.String()
-}
-
-// Represents a token that was consumed during reSynchronization
-// rather than during a valid Match operation. For example,
-// we will create this kind of a node during single token insertion
-// and deletion as well as during "consume until error recovery set"
-// upon no viable alternative exceptions.
-
-type ErrorNodeImpl struct {
- *TerminalNodeImpl
-}
-
-var _ ErrorNode = &ErrorNodeImpl{}
-
-func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
- en := new(ErrorNodeImpl)
- en.TerminalNodeImpl = NewTerminalNodeImpl(token)
- return en
-}
-
-func (e *ErrorNodeImpl) errorNode() {}
-
-func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
- return v.VisitErrorNode(e)
-}
-
-type ParseTreeWalker struct {
-}
-
-func NewParseTreeWalker() *ParseTreeWalker {
- return new(ParseTreeWalker)
-}
-
-// Performs a walk on the given parse tree starting at the root and going down recursively
-// with depth-first search. On each node, EnterRule is called before
-// recursively walking down into child nodes, then
-// ExitRule is called after the recursive call to wind up.
-func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
- switch tt := t.(type) {
- case ErrorNode:
- listener.VisitErrorNode(tt)
- case TerminalNode:
- listener.VisitTerminal(tt)
- default:
- p.EnterRule(listener, t.(RuleNode))
- for i := 0; i < t.GetChildCount(); i++ {
- child := t.GetChild(i)
- p.Walk(listener, child)
- }
- p.ExitRule(listener, t.(RuleNode))
- }
-}
-
-// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
-// then by triggering the event specific to the given parse tree node
-func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
- ctx := r.GetRuleContext().(ParserRuleContext)
- listener.EnterEveryRule(ctx)
- ctx.EnterRule(listener)
-}
-
-// Exits a grammar rule by first triggering the event specific to the given parse tree node
-// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
-func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
- ctx := r.GetRuleContext().(ParserRuleContext)
- ctx.ExitRule(listener)
- listener.ExitEveryRule(ctx)
-}
-
-var ParseTreeWalkerDefault = NewParseTreeWalker()
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
deleted file mode 100644
index 9fad5d916..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-// Use of this file is governed by the BSD 3-clause license that
-// can be found in the LICENSE.txt file in the project root.
-
-package antlr
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math/bits"
- "strconv"
- "strings"
-)
-
-func intMin(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func intMax(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-// A simple integer stack
-
-type IntStack []int
-
-var ErrEmptyStack = errors.New("Stack is empty")
-
-func (s *IntStack) Pop() (int, error) {
- l := len(*s) - 1
- if l < 0 {
- return 0, ErrEmptyStack
- }
- v := (*s)[l]
- *s = (*s)[0:l]
- return v, nil
-}
-
-func (s *IntStack) Push(e int) {
- *s = append(*s, e)
-}
-
-type comparable interface {
- Equals(other Collectable[any]) bool
-}
-
-func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool {
-
- return a.Equals(b)
-}
-
-func standardHashFunction(a interface{}) int {
- if h, ok := a.(hasher); ok {
- return h.Hash()
- }
-
- panic("Not Hasher")
-}
-
-type hasher interface {
- Hash() int
-}
-
-const bitsPerWord = 64
-
-func indexForBit(bit int) int {
- return bit / bitsPerWord
-}
-
-func wordForBit(data []uint64, bit int) uint64 {
- idx := indexForBit(bit)
- if idx >= len(data) {
- return 0
- }
- return data[idx]
-}
-
-func maskForBit(bit int) uint64 {
- return uint64(1) << (bit % bitsPerWord)
-}
-
-func wordsNeeded(bit int) int {
- return indexForBit(bit) + 1
-}
-
-type BitSet struct {
- data []uint64
-}
-
-func NewBitSet() *BitSet {
- return &BitSet{}
-}
-
-func (b *BitSet) add(value int) {
- idx := indexForBit(value)
- if idx >= len(b.data) {
- size := wordsNeeded(value)
- data := make([]uint64, size)
- copy(data, b.data)
- b.data = data
- }
- b.data[idx] |= maskForBit(value)
-}
-
-func (b *BitSet) clear(index int) {
- idx := indexForBit(index)
- if idx >= len(b.data) {
- return
- }
- b.data[idx] &= ^maskForBit(index)
-}
-
-func (b *BitSet) or(set *BitSet) {
- // Get min size necessary to represent the bits in both sets.
- bLen := b.minLen()
- setLen := set.minLen()
- maxLen := intMax(bLen, setLen)
- if maxLen > len(b.data) {
- // Increase the size of len(b.data) to repesent the bits in both sets.
- data := make([]uint64, maxLen)
- copy(data, b.data)
- b.data = data
- }
- // len(b.data) is at least setLen.
- for i := 0; i < setLen; i++ {
- b.data[i] |= set.data[i]
- }
-}
-
-func (b *BitSet) remove(value int) {
- b.clear(value)
-}
-
-func (b *BitSet) contains(value int) bool {
- idx := indexForBit(value)
- if idx >= len(b.data) {
- return false
- }
- return (b.data[idx] & maskForBit(value)) != 0
-}
-
-func (b *BitSet) minValue() int {
- for i, v := range b.data {
- if v == 0 {
- continue
- }
- return i*bitsPerWord + bits.TrailingZeros64(v)
- }
- return 2147483647
-}
-
-func (b *BitSet) equals(other interface{}) bool {
- otherBitSet, ok := other.(*BitSet)
- if !ok {
- return false
- }
-
- if b == otherBitSet {
- return true
- }
-
- // We only compare set bits, so we cannot rely on the two slices having the same size. Its
- // possible for two BitSets to have different slice lengths but the same set bits. So we only
- // compare the relevant words and ignore the trailing zeros.
- bLen := b.minLen()
- otherLen := otherBitSet.minLen()
-
- if bLen != otherLen {
- return false
- }
-
- for i := 0; i < bLen; i++ {
- if b.data[i] != otherBitSet.data[i] {
- return false
- }
- }
-
- return true
-}
-
-func (b *BitSet) minLen() int {
- for i := len(b.data); i > 0; i-- {
- if b.data[i-1] != 0 {
- return i
- }
- }
- return 0
-}
-
-func (b *BitSet) length() int {
- cnt := 0
- for _, val := range b.data {
- cnt += bits.OnesCount64(val)
- }
- return cnt
-}
-
-func (b *BitSet) String() string {
- vals := make([]string, 0, b.length())
-
- for i, v := range b.data {
- for v != 0 {
- n := bits.TrailingZeros64(v)
- vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
- v &= ^(uint64(1) << n)
- }
- }
-
- return "{" + strings.Join(vals, ", ") + "}"
-}
-
-type AltDict struct {
- data map[string]interface{}
-}
-
-func NewAltDict() *AltDict {
- d := new(AltDict)
- d.data = make(map[string]interface{})
- return d
-}
-
-func (a *AltDict) Get(key string) interface{} {
- key = "k-" + key
- return a.data[key]
-}
-
-func (a *AltDict) put(key string, value interface{}) {
- key = "k-" + key
- a.data[key] = value
-}
-
-func (a *AltDict) values() []interface{} {
- vs := make([]interface{}, len(a.data))
- i := 0
- for _, v := range a.data {
- vs[i] = v
- i++
- }
- return vs
-}
-
-type DoubleDict struct {
- data map[int]map[int]interface{}
-}
-
-func NewDoubleDict() *DoubleDict {
- dd := new(DoubleDict)
- dd.data = make(map[int]map[int]interface{})
- return dd
-}
-
-func (d *DoubleDict) Get(a, b int) interface{} {
- data := d.data[a]
-
- if data == nil {
- return nil
- }
-
- return data[b]
-}
-
-func (d *DoubleDict) set(a, b int, o interface{}) {
- data := d.data[a]
-
- if data == nil {
- data = make(map[int]interface{})
- d.data[a] = data
- }
-
- data[b] = o
-}
-
-func EscapeWhitespace(s string, escapeSpaces bool) string {
-
- s = strings.Replace(s, "\t", "\\t", -1)
- s = strings.Replace(s, "\n", "\\n", -1)
- s = strings.Replace(s, "\r", "\\r", -1)
- if escapeSpaces {
- s = strings.Replace(s, " ", "\u00B7", -1)
- }
- return s
-}
-
-func TerminalNodeToStringArray(sa []TerminalNode) []string {
- st := make([]string, len(sa))
-
- for i, s := range sa {
- st[i] = fmt.Sprintf("%v", s)
- }
-
- return st
-}
-
-func PrintArrayJavaStyle(sa []string) string {
- var buffer bytes.Buffer
-
- buffer.WriteString("[")
-
- for i, s := range sa {
- buffer.WriteString(s)
- if i != len(sa)-1 {
- buffer.WriteString(", ")
- }
- }
-
- buffer.WriteString("]")
-
- return buffer.String()
-}
-
-// murmur hash
-func murmurInit(seed int) int {
- return seed
-}
-
-func murmurUpdate(h int, value int) int {
- const c1 uint32 = 0xCC9E2D51
- const c2 uint32 = 0x1B873593
- const r1 uint32 = 15
- const r2 uint32 = 13
- const m uint32 = 5
- const n uint32 = 0xE6546B64
-
- k := uint32(value)
- k *= c1
- k = (k << r1) | (k >> (32 - r1))
- k *= c2
-
- hash := uint32(h) ^ k
- hash = (hash << r2) | (hash >> (32 - r2))
- hash = hash*m + n
- return int(hash)
-}
-
-func murmurFinish(h int, numberOfWords int) int {
- var hash = uint32(h)
- hash ^= uint32(numberOfWords) << 2
- hash ^= hash >> 16
- hash *= 0x85ebca6b
- hash ^= hash >> 13
- hash *= 0xc2b2ae35
- hash ^= hash >> 16
-
- return int(hash)
-}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
deleted file mode 100644
index c9bd6751e..000000000
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package antlr
-
-import "math"
-
-const (
- _initalCapacity = 16
- _initalBucketCapacity = 8
- _loadFactor = 0.75
-)
-
-type Set interface {
- Add(value interface{}) (added interface{})
- Len() int
- Get(value interface{}) (found interface{})
- Contains(value interface{}) bool
- Values() []interface{}
- Each(f func(interface{}) bool)
-}
-
-type array2DHashSet struct {
- buckets [][]Collectable[any]
- hashcodeFunction func(interface{}) int
- equalsFunction func(Collectable[any], Collectable[any]) bool
-
- n int // How many elements in set
- threshold int // when to expand
-
- currentPrime int // jump by 4 primes each expand or whatever
- initialBucketCapacity int
-}
-
-func (as *array2DHashSet) Each(f func(interface{}) bool) {
- if as.Len() < 1 {
- return
- }
-
- for _, bucket := range as.buckets {
- for _, o := range bucket {
- if o == nil {
- break
- }
- if !f(o) {
- return
- }
- }
- }
-}
-
-func (as *array2DHashSet) Values() []interface{} {
- if as.Len() < 1 {
- return nil
- }
-
- values := make([]interface{}, 0, as.Len())
- as.Each(func(i interface{}) bool {
- values = append(values, i)
- return true
- })
- return values
-}
-
-func (as *array2DHashSet) Contains(value Collectable[any]) bool {
- return as.Get(value) != nil
-}
-
-func (as *array2DHashSet) Add(value Collectable[any]) interface{} {
- if as.n > as.threshold {
- as.expand()
- }
- return as.innerAdd(value)
-}
-
-func (as *array2DHashSet) expand() {
- old := as.buckets
-
- as.currentPrime += 4
-
- var (
- newCapacity = len(as.buckets) << 1
- newTable = as.createBuckets(newCapacity)
- newBucketLengths = make([]int, len(newTable))
- )
-
- as.buckets = newTable
- as.threshold = int(float64(newCapacity) * _loadFactor)
-
- for _, bucket := range old {
- if bucket == nil {
- continue
- }
-
- for _, o := range bucket {
- if o == nil {
- break
- }
-
- b := as.getBuckets(o)
- bucketLength := newBucketLengths[b]
- var newBucket []Collectable[any]
- if bucketLength == 0 {
- // new bucket
- newBucket = as.createBucket(as.initialBucketCapacity)
- newTable[b] = newBucket
- } else {
- newBucket = newTable[b]
- if bucketLength == len(newBucket) {
- // expand
- newBucketCopy := make([]Collectable[any], len(newBucket)<<1)
- copy(newBucketCopy[:bucketLength], newBucket)
- newBucket = newBucketCopy
- newTable[b] = newBucket
- }
- }
-
- newBucket[bucketLength] = o
- newBucketLengths[b]++
- }
- }
-}
-
-func (as *array2DHashSet) Len() int {
- return as.n
-}
-
-func (as *array2DHashSet) Get(o Collectable[any]) interface{} {
- if o == nil {
- return nil
- }
-
- b := as.getBuckets(o)
- bucket := as.buckets[b]
- if bucket == nil { // no bucket
- return nil
- }
-
- for _, e := range bucket {
- if e == nil {
- return nil // empty slot; not there
- }
- if as.equalsFunction(e, o) {
- return e
- }
- }
-
- return nil
-}
-
-func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} {
- b := as.getBuckets(o)
-
- bucket := as.buckets[b]
-
- // new bucket
- if bucket == nil {
- bucket = as.createBucket(as.initialBucketCapacity)
- bucket[0] = o
-
- as.buckets[b] = bucket
- as.n++
- return o
- }
-
- // look for it in bucket
- for i := 0; i < len(bucket); i++ {
- existing := bucket[i]
- if existing == nil { // empty slot; not there, add.
- bucket[i] = o
- as.n++
- return o
- }
-
- if as.equalsFunction(existing, o) { // found existing, quit
- return existing
- }
- }
-
- // full bucket, expand and add to end
- oldLength := len(bucket)
- bucketCopy := make([]Collectable[any], oldLength<<1)
- copy(bucketCopy[:oldLength], bucket)
- bucket = bucketCopy
- as.buckets[b] = bucket
- bucket[oldLength] = o
- as.n++
- return o
-}
-
-func (as *array2DHashSet) getBuckets(value Collectable[any]) int {
- hash := as.hashcodeFunction(value)
- return hash & (len(as.buckets) - 1)
-}
-
-func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] {
- return make([][]Collectable[any], cap)
-}
-
-func (as *array2DHashSet) createBucket(cap int) []Collectable[any] {
- return make([]Collectable[any], cap)
-}
-
-func newArray2DHashSetWithCap(
- hashcodeFunction func(interface{}) int,
- equalsFunction func(Collectable[any], Collectable[any]) bool,
- initCap int,
- initBucketCap int,
-) *array2DHashSet {
- if hashcodeFunction == nil {
- hashcodeFunction = standardHashFunction
- }
-
- if equalsFunction == nil {
- equalsFunction = standardEqualsFunction
- }
-
- ret := &array2DHashSet{
- hashcodeFunction: hashcodeFunction,
- equalsFunction: equalsFunction,
-
- n: 0,
- threshold: int(math.Floor(_initalCapacity * _loadFactor)),
-
- currentPrime: 1,
- initialBucketCapacity: initBucketCap,
- }
-
- ret.buckets = ret.createBuckets(initCap)
- return ret
-}
-
-func newArray2DHashSet(
- hashcodeFunction func(interface{}) int,
- equalsFunction func(Collectable[any], Collectable[any]) bool,
-) *array2DHashSet {
- return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
-}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
new file mode 100644
index 000000000..38ea34ff5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/.gitignore
@@ -0,0 +1,18 @@
+### Go template
+
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+
+# Go workspace file
+go.work
+
+# No Goland stuff in this repo
+.idea
diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
new file mode 100644
index 000000000..a22292eb5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+3. Neither name of copyright holders nor the names of its contributors
+may be used to endorse or promote products derived from this software
+without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md
new file mode 100644
index 000000000..03e5b83eb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/README.md
@@ -0,0 +1,54 @@
+[](https://goreportcard.com/report/github.com/antlr4-go/antlr)
+[](https://pkg.go.dev/github.com/antlr4-go/antlr)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/releases/latest)
+[](https://github.com/antlr4-go/antlr/commit-activity)
+[](https://opensource.org/licenses/BSD-3-Clause)
+[](https://GitHub.com/Naereen/StrapDown.js/stargazers/)
+# ANTLR4 Go Runtime Module Repo
+
+IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here.
+
+ - Do not submit PRs or any change requests to this repo
+ - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR
+ - This repo contains the Go runtime that your generated projects should import
+
+## Introduction
+
+This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained
+at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create
+the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here.
+
+The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically.
+
+### Why?
+
+The `go get` command is unable to retrieve the Go runtime when it is embedded so
+deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime,
+does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and
+causes confusion.
+
+For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc
+)
+```
+
+Where you would expect to see:
+
+```sh
+require (
+ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0
+)
+```
+
+The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and
+from whence users can expect `go get` to behave as expected.
+
+
+# Documentation
+Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on
+migrating existing projects to use the new module location and for information on how to use the Go runtime in
+general.
diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
new file mode 100644
index 000000000..3bb4fd7c4
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go
@@ -0,0 +1,102 @@
+/*
+Package antlr implements the Go version of the ANTLR 4 runtime.
+
+# The ANTLR Tool
+
+ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing,
+or translating structured text or binary files. It's widely used to build languages, tools, and frameworks.
+From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface
+(or visitor) that makes it easy to respond to the recognition of phrases of interest.
+
+# Go Runtime
+
+At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime
+source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of
+ANTLR4 that it is compatible with (I.E. uses the /v4 path).
+
+However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root
+of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code.
+This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not
+list the release tag such as @4.12.0 - this was confusing, to say the least.
+
+As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr`
+(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information,
+which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs].
+
+This means that if you are using the source code without modules, you should also use the source code in the [new repo].
+Though we highly recommend that you use go modules, as they are now idiomatic for Go.
+
+I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good.
+
+Go runtime author: [Jim Idle] jimi@idle.ws
+
+# Code Generation
+
+ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a
+runtime library, written specifically to support the generated code in the target language. This library is the
+runtime for the Go target.
+
+To generate code for the go target, it is generally recommended to place the source grammar files in a package of
+their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory
+it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean
+that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other
+way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in
+your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as
+it was at any point in its history.
+
+Here is a general/recommended template for an ANTLR based recognizer in Go:
+
+ .
+ ├── parser
+ │ ├── mygrammar.g4
+ │ ├── antlr-4.12.1-complete.jar
+ │ ├── generate.go
+ │ └── generate.sh
+ ├── parsing - generated code goes here
+ │ └── error_listeners.go
+ ├── go.mod
+ ├── go.sum
+ ├── main.go
+ └── main_test.go
+
+Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in.
+
+The generate.go file then looks like this:
+
+ package parser
+
+ //go:generate ./generate.sh
+
+And the generate.sh file will look similar to this:
+
+ #!/bin/sh
+
+ alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool'
+ antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4
+
+depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here
+is to generate the code into a
+
+From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command:
+
+ go generate ./...
+
+Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code
+by importing the parsing package.
+
+There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like.
+
+# Copyright Notice
+
+Copyright (c) 2012-2023 The ANTLR Project. All rights reserved.
+
+Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root.
+
+[target languages]: https://github.com/antlr/antlr4/tree/master/runtime
+[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt
+[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md
+[new repo]: https://github.com/antlr4-go/antlr
+[Jim Idle]: https://github.com/jimidle
+[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md
+*/
+package antlr
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go
similarity index 94%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
rename to vendor/github.com/antlr4-go/antlr/v4/atn.go
index 98010d2e6..cdeefed24 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go
@@ -20,10 +20,11 @@ var ATNInvalidAltNumber int
// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf
// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network
type ATN struct {
- // DecisionToState is the decision points for all rules, subrules, optional
- // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we
+
+ // DecisionToState is the decision points for all rules, sub-rules, optional
+ // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we
// can go back later and build DFA predictors for them. This includes
- // all the rules, subrules, optional blocks, ()+, ()* etc...
+ // all the rules, sub-rules, optional blocks, ()+, ()* etc...
DecisionToState []DecisionState
// grammarType is the ATN type and is used for deserializing ATNs from strings.
@@ -51,6 +52,8 @@ type ATN struct {
// specified, and otherwise is nil.
ruleToTokenType []int
+ // ATNStates is a list of all states in the ATN, ordered by state number.
+ //
states []ATNState
mu sync.Mutex
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
new file mode 100644
index 000000000..a83f25d34
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go
@@ -0,0 +1,335 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+const (
+ lexerConfig = iota // Indicates that this ATNConfig is for a lexer
+ parserConfig // Indicates that this ATNConfig is for a parser
+)
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive in the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context *PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+ cType int // lexerConfig or parserConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only
+func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ return NewATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context
+func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ pac := &ATNConfig{}
+ pac.state = state
+ pac.alt = alt
+ pac.context = context
+ pac.semanticContext = semanticContext
+ pac.cType = parserConfig
+ return pac
+}
+
+// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only
+func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context
+func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only
+func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig {
+ return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only
+func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ return NewATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors'
+// are just wrappers around this one.
+func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed
+ }
+ b := &ATNConfig{}
+ b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext)
+ b.cType = parserConfig
+ return b
+}
+
+func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) {
+
+ a.state = state
+ a.alt = alt
+ a.context = context
+ a.semanticContext = semanticContext
+ a.reachesIntoOuterContext = c.GetReachesIntoOuterContext()
+ a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed()
+}
+
+func (a *ATNConfig) getPrecedenceFilterSuppressed() bool {
+ return a.precedenceFilterSuppressed
+}
+
+func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ a.precedenceFilterSuppressed = v
+}
+
+// GetState returns the ATN state associated with this configuration
+func (a *ATNConfig) GetState() ATNState {
+ return a.state
+}
+
+// GetAlt returns the alternative associated with this configuration
+func (a *ATNConfig) GetAlt() int {
+ return a.alt
+}
+
+// SetContext sets the rule invocation stack associated with this configuration
+func (a *ATNConfig) SetContext(v *PredictionContext) {
+ a.context = v
+}
+
+// GetContext returns the rule invocation stack associated with this configuration
+func (a *ATNConfig) GetContext() *PredictionContext {
+ return a.context
+}
+
+// GetSemanticContext returns the semantic context associated with this configuration
+func (a *ATNConfig) GetSemanticContext() SemanticContext {
+ return a.semanticContext
+}
+
+// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration
+func (a *ATNConfig) GetReachesIntoOuterContext() int {
+ return a.reachesIntoOuterContext
+}
+
+// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration
+func (a *ATNConfig) SetReachesIntoOuterContext(v int) {
+ a.reachesIntoOuterContext = v
+}
+
+// Equals is the default comparison function for an ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool {
+ switch a.cType {
+ case lexerConfig:
+ return a.LEquals(o)
+ case parserConfig:
+ return a.PEquals(o)
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required
+// for a collection.
+//
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool {
+ var other, ok = o.(*ATNConfig)
+
+ if !ok {
+ return false
+ }
+ if a == other {
+ return true
+ } else if other == nil {
+ return false
+ }
+
+ var equal bool
+
+ if a.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = a.context.Equals(other.context)
+ }
+
+ var (
+ nums = a.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = a.alt == other.alt
+ cons = a.semanticContext.Equals(other.semanticContext)
+ sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+// Hash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) Hash() int {
+ switch a.cType {
+ case lexerConfig:
+ return a.LHash()
+ case parserConfig:
+ return a.PHash()
+ default:
+ panic("Invalid ATNConfig type")
+ }
+}
+
+// PHash is the default hash function for a parser ATNConfig, when no specialist hash function
+// is required for a collection
+func (a *ATNConfig) PHash() int {
+ var c int
+ if a.context != nil {
+ c = a.context.Hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ return murmurFinish(h, 4)
+}
+
+// String returns a string representation of the ATNConfig, usually used for debugging purposes
+func (a *ATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if a.context != nil {
+ s1 = ",[" + fmt.Sprint(a.context) + "]"
+ }
+
+ if a.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(a.semanticContext)
+ }
+
+ if a.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3)
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.lexerActionExecutor = c.lexerActionExecutor
+ lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state)
+ lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext())
+ lac.cType = lexerConfig
+ return lac
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig {
+ lac := &ATNConfig{}
+ lac.state = state
+ lac.alt = alt
+ lac.context = context
+ lac.semanticContext = SemanticContextNone
+ lac.cType = lexerConfig
+ return lac
+}
+
+// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LHash() int {
+ var f int
+ if a.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, a.state.GetStateNumber())
+ h = murmurUpdate(h, a.alt)
+ h = murmurUpdate(h, a.context.Hash())
+ h = murmurUpdate(h, a.semanticContext.Hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, a.lexerActionExecutor.Hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via
+// the default comparator [ObjEqComparator].
+func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool {
+ var otherT, ok = other.(*ATNConfig)
+ if !ok {
+ return false
+ } else if a == otherT {
+ return true
+ } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ switch {
+ case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil:
+ return true
+ case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil:
+ if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) {
+ return false
+ }
+ default:
+ return false // One but not both, are nil
+ }
+
+ return a.PEquals(otherT)
+}
+
+func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
new file mode 100644
index 000000000..52dbaf806
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go
@@ -0,0 +1,301 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// ATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type ATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two ATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]]
+
+ // configs is the added elements that did not match an existing key in configLookup
+ configs []*ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the ATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from this set.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not, protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves re-computation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+// Alts returns the combined set of alts for all the configurations in this set.
+func (b *ATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+// NewATNConfigSet creates a new ATNConfigSet instance.
+func NewATNConfigSet(fullCtx bool) *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _),
+// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and
+// 'pi' is the [ATNConfig].semanticContext.
+//
+// We use (s,i,pi) as the key.
+// Updates dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing, present := b.configLookup.Put(config)
+
+ // The config was not already in the set
+ //
+ if !present {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+// GetStates returns the set of states represented by all configurations in this config set
+func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] {
+
+ // states uses the standard comparator and Hash() provided by the ATNState instance
+ //
+ states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()")
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Put(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *ATNConfigSet) GetPredicates() []SemanticContext {
+ predicates := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ predicates = append(predicates, c)
+ }
+ }
+
+ return predicates
+}
+
+func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ // Empty indicate no optimization is possible
+ if b.configLookup == nil || b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+// Compare The configs are only equal if they are in the same order and their Equals function returns true.
+// Java uses ArrayList.equals(), which requires the same order.
+func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool {
+ if len(b.configs) != len(bs.configs) {
+ return false
+ }
+ for i := 0; i < len(b.configs); i++ {
+ if !b.configs[i].Equals(bs.configs[i]) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*ATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*ATNConfigSet)
+ var eca bool
+ switch {
+ case b.conflictingAlts == nil && other2.conflictingAlts == nil:
+ eca = true
+ case b.conflictingAlts != nil && other2.conflictingAlts != nil:
+ eca = b.conflictingAlts.equals(other2.conflictingAlts)
+ }
+ return b.configs != nil &&
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ eca &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext &&
+ b.Compare(other2)
+}
+
+func (b *ATNConfigSet) Hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *ATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.Hash()
+ }
+ return h
+}
+
+func (b *ATNConfigSet) Contains(item *ATNConfig) bool {
+ if b.readOnly {
+ panic("not implemented for read-only sets")
+ }
+ if b.configLookup == nil {
+ return false
+ }
+ return b.configLookup.Contains(item)
+}
+
+func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool {
+ return b.Contains(item)
+}
+
+func (b *ATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+ b.configs = make([]*ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()")
+}
+
+func (b *ATNConfigSet) String() string {
+
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair
+// for use in lexers.
+func NewOrderedATNConfigSet() *ATNConfigSet {
+ return &ATNConfigSet{
+ cachedHash: -1,
+ // This set uses the standard Hash() and Equals() from ATNConfig
+ configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"),
+ fullCtx: false,
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
similarity index 86%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
index 3c975ec7b..bdb30b362 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go
@@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool {
func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
if opts.readOnly {
- panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
}
opts.readOnly = readOnly
}
@@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool {
func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
if opts.readOnly {
- panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
}
opts.verifyATN = verifyATN
}
@@ -42,11 +42,12 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
if opts.readOnly {
- panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ panic(errors.New("cannot mutate read only ATNDeserializationOptions"))
}
opts.generateRuleBypassTransitions = generateRuleBypassTransitions
}
+//goland:noinspection GoUnusedExportedFunction
func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
similarity index 97%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
index 3888856b4..2dcb9ae11 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go
@@ -35,6 +35,7 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
return &ATNDeserializer{options: options}
}
+//goland:noinspection GoUnusedFunction
func stringInSlice(a string, list []string) int {
for i, b := range list {
if b == a {
@@ -193,7 +194,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) {
}
}
-func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
+func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet {
m := a.readInt()
// Preallocate the needed capacity.
@@ -350,7 +351,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
bypassStart.endState = bypassStop
- atn.defineDecisionState(bypassStart.BaseDecisionState)
+ atn.defineDecisionState(&bypassStart.BaseDecisionState)
bypassStop.startState = bypassStart
@@ -450,7 +451,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
continue
}
- // We analyze the ATN to determine if a ATN decision state is the
+ // We analyze the [ATN] to determine if an ATN decision state is the
// decision for the closure block that determines whether a
// precedence rule should continue or complete.
if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
@@ -553,7 +554,7 @@ func (a *ATNDeserializer) readInt() int {
return int(v) // data is 32 bits but int is at least that big
}
-func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
target := atn.states[trg]
switch typeIndex {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
new file mode 100644
index 000000000..afe6c9f80
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false))
+
+type IATNSimulator interface {
+ SharedContextCache() *PredictionContextCache
+ ATN() *ATN
+ DecisionToDFA() []*DFA
+}
+
+type BaseATNSimulator struct {
+ atn *ATN
+ sharedContextCache *PredictionContextCache
+ decisionToDFA []*DFA
+}
+
+func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext {
+ if b.sharedContextCache == nil {
+ return context
+ }
+
+ //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()")
+ visited := NewVisitRecord()
+ return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
+}
+
+func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
+ return b.sharedContextCache
+}
+
+func (b *BaseATNSimulator) ATN() *ATN {
+ return b.atn
+}
+
+func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
+ return b.decisionToDFA
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
new file mode 100644
index 000000000..2ae5807cd
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go
@@ -0,0 +1,461 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Constants for serialization.
+const (
+ ATNStateInvalidType = 0
+ ATNStateBasic = 1
+ ATNStateRuleStart = 2
+ ATNStateBlockStart = 3
+ ATNStatePlusBlockStart = 4
+ ATNStateStarBlockStart = 5
+ ATNStateTokenStart = 6
+ ATNStateRuleStop = 7
+ ATNStateBlockEnd = 8
+ ATNStateStarLoopBack = 9
+ ATNStateStarLoopEntry = 10
+ ATNStatePlusLoopBack = 11
+ ATNStateLoopEnd = 12
+
+ ATNStateInvalidStateNumber = -1
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ATNStateInitialNumTransitions = 4
+
+type ATNState interface {
+ GetEpsilonOnlyTransitions() bool
+
+ GetRuleIndex() int
+ SetRuleIndex(int)
+
+ GetNextTokenWithinRule() *IntervalSet
+ SetNextTokenWithinRule(*IntervalSet)
+
+ GetATN() *ATN
+ SetATN(*ATN)
+
+ GetStateType() int
+
+ GetStateNumber() int
+ SetStateNumber(int)
+
+ GetTransitions() []Transition
+ SetTransitions([]Transition)
+ AddTransition(Transition, int)
+
+ String() string
+ Hash() int
+ Equals(Collectable[ATNState]) bool
+}
+
+type BaseATNState struct {
+ // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
+ NextTokenWithinRule *IntervalSet
+
+ // atn is the current ATN.
+ atn *ATN
+
+ epsilonOnlyTransitions bool
+
+ // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
+ ruleIndex int
+
+ stateNumber int
+
+ stateType int
+
+ // Track the transitions emanating from this ATN state.
+ transitions []Transition
+}
+
+func NewATNState() *BaseATNState {
+ return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
+}
+
+func (as *BaseATNState) GetRuleIndex() int {
+ return as.ruleIndex
+}
+
+func (as *BaseATNState) SetRuleIndex(v int) {
+ as.ruleIndex = v
+}
+func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
+ return as.epsilonOnlyTransitions
+}
+
+func (as *BaseATNState) GetATN() *ATN {
+ return as.atn
+}
+
+func (as *BaseATNState) SetATN(atn *ATN) {
+ as.atn = atn
+}
+
+func (as *BaseATNState) GetTransitions() []Transition {
+ return as.transitions
+}
+
+func (as *BaseATNState) SetTransitions(t []Transition) {
+ as.transitions = t
+}
+
+func (as *BaseATNState) GetStateType() int {
+ return as.stateType
+}
+
+func (as *BaseATNState) GetStateNumber() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) SetStateNumber(stateNumber int) {
+ as.stateNumber = stateNumber
+}
+
+func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
+ return as.NextTokenWithinRule
+}
+
+func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
+ as.NextTokenWithinRule = v
+}
+
+func (as *BaseATNState) Hash() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) String() string {
+ return strconv.Itoa(as.stateNumber)
+}
+
+func (as *BaseATNState) Equals(other Collectable[ATNState]) bool {
+ if ot, ok := other.(ATNState); ok {
+ return as.stateNumber == ot.GetStateNumber()
+ }
+
+ return false
+}
+
+func (as *BaseATNState) isNonGreedyExitState() bool {
+ return false
+}
+
+func (as *BaseATNState) AddTransition(trans Transition, index int) {
+ if len(as.transitions) == 0 {
+ as.epsilonOnlyTransitions = trans.getIsEpsilon()
+ } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
+ _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber)
+ as.epsilonOnlyTransitions = false
+ }
+
+ // TODO: Check code for already present compared to the Java equivalent
+ //alreadyPresent := false
+ //for _, t := range as.transitions {
+ // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() {
+ // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) {
+ // alreadyPresent = true
+ // break
+ // }
+ // } else if t.getIsEpsilon() && trans.getIsEpsilon() {
+ // alreadyPresent = true
+ // break
+ // }
+ //}
+ //if !alreadyPresent {
+ if index == -1 {
+ as.transitions = append(as.transitions, trans)
+ } else {
+ as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
+ // TODO: as.transitions.splice(index, 1, trans)
+ }
+ //} else {
+ // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber)
+ //}
+}
+
+type BasicState struct {
+ BaseATNState
+}
+
+func NewBasicState() *BasicState {
+ return &BasicState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ }
+}
+
+type DecisionState interface {
+ ATNState
+
+ getDecision() int
+ setDecision(int)
+
+ getNonGreedy() bool
+ setNonGreedy(bool)
+}
+
+type BaseDecisionState struct {
+ BaseATNState
+ decision int
+ nonGreedy bool
+}
+
+func NewBaseDecisionState() *BaseDecisionState {
+ return &BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ }
+}
+
+func (s *BaseDecisionState) getDecision() int {
+ return s.decision
+}
+
+func (s *BaseDecisionState) setDecision(b int) {
+ s.decision = b
+}
+
+func (s *BaseDecisionState) getNonGreedy() bool {
+ return s.nonGreedy
+}
+
+func (s *BaseDecisionState) setNonGreedy(b bool) {
+ s.nonGreedy = b
+}
+
+type BlockStartState interface {
+ DecisionState
+
+ getEndState() *BlockEndState
+ setEndState(*BlockEndState)
+}
+
+// BaseBlockStartState is the start of a regular (...) block.
+type BaseBlockStartState struct {
+ BaseDecisionState
+ endState *BlockEndState
+}
+
+func NewBlockStartState() *BaseBlockStartState {
+ return &BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBasic,
+ },
+ decision: -1,
+ },
+ }
+}
+
+func (s *BaseBlockStartState) getEndState() *BlockEndState {
+ return s.endState
+}
+
+func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
+ s.endState = b
+}
+
+type BasicBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewBasicBlockStartState() *BasicBlockStartState {
+ return &BasicBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &BasicBlockStartState{}
+
+// BlockEndState is a terminal node of a simple (a|b|c) block.
+type BlockEndState struct {
+ BaseATNState
+ startState ATNState
+}
+
+func NewBlockEndState() *BlockEndState {
+ return &BlockEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateBlockEnd,
+ },
+ startState: nil,
+ }
+}
+
+// RuleStopState is the last node in the ATN for a rule, unless that rule is the
+// start symbol. In that case, there is one transition to EOF. Later, we might
+// encode references to all calls to this rule to compute FOLLOW sets for error
+// handling.
+type RuleStopState struct {
+ BaseATNState
+}
+
+func NewRuleStopState() *RuleStopState {
+ return &RuleStopState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStop,
+ },
+ }
+}
+
+type RuleStartState struct {
+ BaseATNState
+ stopState ATNState
+ isPrecedenceRule bool
+}
+
+func NewRuleStartState() *RuleStartState {
+ return &RuleStartState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateRuleStart,
+ },
+ }
+}
+
+// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
+// transitions: one to the loop back to start of the block, and one to exit.
+type PlusLoopbackState struct {
+ BaseDecisionState
+}
+
+func NewPlusLoopbackState() *PlusLoopbackState {
+ return &PlusLoopbackState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusLoopBack,
+ },
+ },
+ }
+}
+
+// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
+// decision state; we don't use it for code generation. Somebody might need it,
+// it is included for completeness. In reality, PlusLoopbackState is the real
+// decision-making node for A+.
+type PlusBlockStartState struct {
+ BaseBlockStartState
+ loopBackState ATNState
+}
+
+func NewPlusBlockStartState() *PlusBlockStartState {
+ return &PlusBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStatePlusBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &PlusBlockStartState{}
+
+// StarBlockStartState is the block that begins a closure loop.
+type StarBlockStartState struct {
+ BaseBlockStartState
+}
+
+func NewStarBlockStartState() *StarBlockStartState {
+ return &StarBlockStartState{
+ BaseBlockStartState: BaseBlockStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarBlockStart,
+ },
+ },
+ },
+ }
+}
+
+var _ BlockStartState = &StarBlockStartState{}
+
+type StarLoopbackState struct {
+ BaseATNState
+}
+
+func NewStarLoopbackState() *StarLoopbackState {
+ return &StarLoopbackState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopBack,
+ },
+ }
+}
+
+type StarLoopEntryState struct {
+ BaseDecisionState
+ loopBackState ATNState
+ precedenceRuleDecision bool
+}
+
+func NewStarLoopEntryState() *StarLoopEntryState {
+ // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
+ return &StarLoopEntryState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateStarLoopEntry,
+ },
+ },
+ }
+}
+
+// LoopEndState marks the end of a * or + loop.
+type LoopEndState struct {
+ BaseATNState
+ loopBackState ATNState
+}
+
+func NewLoopEndState() *LoopEndState {
+ return &LoopEndState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateLoopEnd,
+ },
+ }
+}
+
+// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
+type TokensStartState struct {
+ BaseDecisionState
+}
+
+func NewTokensStartState() *TokensStartState {
+ return &TokensStartState{
+ BaseDecisionState: BaseDecisionState{
+ BaseATNState: BaseATNState{
+ stateNumber: ATNStateInvalidStateNumber,
+ stateType: ATNStateTokenStart,
+ },
+ },
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go
rename to vendor/github.com/antlr4-go/antlr/v4/atn_type.go
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
similarity index 89%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
rename to vendor/github.com/antlr4-go/antlr/v4/char_stream.go
index c33f0adb5..bd8127b6b 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go
@@ -8,5 +8,5 @@ type CharStream interface {
IntStream
GetText(int, int) string
GetTextFromTokens(start, end Token) string
- GetTextFromInterval(*Interval) string
+ GetTextFromInterval(Interval) string
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go
rename to vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
similarity index 88%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
rename to vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
index c6c9485a2..b75da9df0 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go
@@ -28,22 +28,24 @@ type CommonTokenStream struct {
// trivial with bt field.
fetchedEOF bool
- // index indexs into tokens of the current token (next token to consume).
+ // index into [tokens] of the current token (next token to consume).
// tokens[p] should be LT(1). It is set to -1 when the stream is first
// constructed or when SetTokenSource is called, indicating that the first token
// has not yet been fetched from the token source. For additional information,
- // see the documentation of IntStream for a description of initializing methods.
+ // see the documentation of [IntStream] for a description of initializing methods.
index int
- // tokenSource is the TokenSource from which tokens for the bt stream are
+ // tokenSource is the [TokenSource] from which tokens for the bt stream are
// fetched.
tokenSource TokenSource
- // tokens is all tokens fetched from the token source. The list is considered a
+ // tokens contains all tokens fetched from the token source. The list is considered a
// complete view of the input once fetchedEOF is set to true.
tokens []Token
}
+// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce
+// tokens and will pull tokens from the given lexer channel.
func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
return &CommonTokenStream{
channel: channel,
@@ -53,6 +55,7 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
}
}
+// GetAllTokens returns all tokens currently pulled from the token source.
func (c *CommonTokenStream) GetAllTokens() []Token {
return c.tokens
}
@@ -61,9 +64,11 @@ func (c *CommonTokenStream) Mark() int {
return 0
}
-func (c *CommonTokenStream) Release(marker int) {}
+func (c *CommonTokenStream) Release(_ int) {}
-func (c *CommonTokenStream) reset() {
+func (c *CommonTokenStream) Reset() {
+ c.fetchedEOF = false
+ c.tokens = make([]Token, 0)
c.Seek(0)
}
@@ -107,7 +112,7 @@ func (c *CommonTokenStream) Consume() {
// Sync makes sure index i in tokens has a token and returns true if a token is
// located at index i and otherwise false.
func (c *CommonTokenStream) Sync(i int) bool {
- n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
+ n := i - len(c.tokens) + 1 // How many more elements do we need?
if n > 0 {
fetched := c.fetch(n)
@@ -193,12 +198,13 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
c.tokenSource = tokenSource
c.tokens = make([]Token, 0)
c.index = -1
+ c.fetchedEOF = false
}
// NextTokenOnChannel returns the index of the next token on channel given a
// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
-// no tokens on channel between i and EOF.
-func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
+// no tokens on channel between 'i' and [TokenEOF].
+func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int {
c.Sync(i)
if i >= len(c.tokens) {
@@ -244,7 +250,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To
nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
from := tokenIndex + 1
- // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
+ // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token
var to int
if nextOnChannel == -1 {
@@ -314,7 +320,8 @@ func (c *CommonTokenStream) Index() int {
}
func (c *CommonTokenStream) GetAllText() string {
- return c.GetTextFromInterval(nil)
+ c.Fill()
+ return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1))
}
func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
@@ -329,15 +336,9 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string
return c.GetTextFromInterval(interval.GetSourceInterval())
}
-func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
+func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string {
c.lazyInit()
-
- if interval == nil {
- c.Fill()
- interval = NewInterval(0, len(c.tokens)-1)
- } else {
- c.Sync(interval.Stop)
- }
+ c.Sync(interval.Stop)
start := interval.Start
stop := interval.Stop
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go b/vendor/github.com/antlr4-go/antlr/v4/comparators.go
similarity index 82%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
rename to vendor/github.com/antlr4-go/antlr/v4/comparators.go
index 9ea320053..7467e9b43 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/comparators.go
@@ -18,17 +18,20 @@ package antlr
// type safety and avoid having to implement this for every type that we want to perform comparison on.
//
// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which
-// allows us to use it in any collection instance that does nto require a special hash or equals implementation.
+// allows us to use it in any collection instance that does not require a special hash or equals implementation.
type ObjEqComparator[T Collectable[T]] struct{}
var (
- aStateEqInst = &ObjEqComparator[ATNState]{}
- aConfEqInst = &ObjEqComparator[ATNConfig]{}
- aConfCompInst = &ATNConfigComparator[ATNConfig]{}
- atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{}
+ aStateEqInst = &ObjEqComparator[ATNState]{}
+ aConfEqInst = &ObjEqComparator[*ATNConfig]{}
+
+ // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache
+ aConfCompInst = &ATNConfigComparator[*ATNConfig]{}
+ atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{}
dfaStateEqInst = &ObjEqComparator[*DFAState]{}
semctxEqInst = &ObjEqComparator[SemanticContext]{}
- atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{}
+ atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{}
+ pContextEqInst = &ObjEqComparator[*PredictionContext]{}
)
// Equals2 delegates to the Equals() method of type T
@@ -44,14 +47,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int {
type SemCComparator[T Collectable[T]] struct{}
-// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet
+// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type ATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -72,7 +75,8 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int {
+func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int {
+
hash := 7
hash = 31*hash + o.GetState().GetStateNumber()
hash = 31*hash + o.GetAlt()
@@ -85,7 +89,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -105,21 +109,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
}
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup
-func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int {
+func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int {
h := murmurInit(7)
h = murmurUpdate(h, o.GetState().GetStateNumber())
h = murmurUpdate(h, o.GetContext().Hash())
return murmurFinish(h, 2)
}
-// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet
+// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet
// and has a custom Equals() and Hash() implementation, because equality is not based on the
// standard Hash() and Equals() methods of the ATNConfig type.
type BaseATNConfigComparator[T Collectable[T]] struct {
}
// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet
-func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
+func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool {
// Same pointer, must be equal, even if both nil
//
@@ -141,7 +145,6 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool {
// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just
// delegates to the standard Hash() method of the ATNConfig type.
-func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int {
-
+func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int {
return o.Hash()
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
new file mode 100644
index 000000000..c2b724514
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/configuration.go
@@ -0,0 +1,214 @@
+package antlr
+
+type runtimeConfiguration struct {
+ statsTraceStacks bool
+ lexerATNSimulatorDebug bool
+ lexerATNSimulatorDFADebug bool
+ parserATNSimulatorDebug bool
+ parserATNSimulatorTraceATNSim bool
+ parserATNSimulatorDFADebug bool
+ parserATNSimulatorRetryDebug bool
+ lRLoopEntryBranchOpt bool
+ memoryManager bool
+}
+
+// Global runtime configuration
+var runtimeConfig = runtimeConfiguration{
+ lRLoopEntryBranchOpt: true,
+}
+
+type runtimeOption func(*runtimeConfiguration) error
+
+// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options.
+// It uses the functional options pattern for go. This is a package global function as it operates on the runtime
+// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is
+// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the
+// only maintainer). However, it is possible that you might want to use this to set a global option concerning the
+// memory allocation type used by the runtime such as sync.Pool or not.
+//
+// The options are applied in the order they are passed in, so the last option will override any previous options.
+//
+// For example, if you want to turn on the collection create point stack flag to true, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// If you want to turn it off, you can do:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func ConfigureRuntime(options ...runtimeOption) error {
+ for _, option := range options {
+ err := option(&runtimeConfig)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of
+// certain structs, such as collections, or the use point of certain methods such as Put().
+// Because this can be expensive, it is turned off by default. However, it
+// can be useful to track down exactly where memory is being created and used.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false))
+func WithStatsTraceStacks(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.statsTraceStacks = trace
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false))
+func WithLexerATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA]
+// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false))
+func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lexerATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false))
+func WithParserATNSimulatorDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator
+// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false))
+func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorTraceATNSim = trace
+ return nil
+ }
+}
+
+// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful
+// to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false))
+func WithParserATNSimulatorDFADebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorDFADebug = debug
+ return nil
+ }
+}
+
+// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA]
+// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// Only useful to the runtime maintainers.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false))
+func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.parserATNSimulatorRetryDebug = debug
+ return nil
+ }
+}
+
+// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be
+// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime.
+// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator].
+//
+// Note that default is to use this optimization.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true))
+//
+// You can turn it off at any time using:
+//
+// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false))
+func WithLRLoopEntryBranchOpt(off bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.lRLoopEntryBranchOpt = off
+ return nil
+ }
+}
+
+// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful
+// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which
+// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute
+// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource
+// grammar, this may help make it more practical.
+//
+// Note that default is to use normal Go memory allocation and not pool memory.
+//
+// Use:
+//
+// antlr.ConfigureRuntime(antlr.WithMemoryManager(true))
+//
+// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other
+// and should remember to nil out any references to the parser or lexer when you are done with them.
+func WithMemoryManager(use bool) runtimeOption {
+ return func(config *runtimeConfiguration) error {
+ config.memoryManager = use
+ return nil
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go b/vendor/github.com/antlr4-go/antlr/v4/dfa.go
similarity index 76%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
rename to vendor/github.com/antlr4-go/antlr/v4/dfa.go
index bfd43e1f7..6b63eb158 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa.go
@@ -4,6 +4,8 @@
package antlr
+// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can
+// reach and the transitions between them.
type DFA struct {
// atnStartState is the ATN state in which this was created
atnStartState DecisionState
@@ -12,10 +14,9 @@ type DFA struct {
// states is all the DFA states. Use Map to get the old state back; Set can only
// indicate whether it is there. Go maps implement key hash collisions and so on and are very
- // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva
+ // good, but the DFAState is an object and can't be used directly as the key as it can in say Java
// amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them
- // to see if they really are the same object.
- //
+ // to see if they really are the same object. Hence, we have our own map storage.
//
states *JStore[*DFAState, *ObjEqComparator[*DFAState]]
@@ -32,11 +33,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA {
dfa := &DFA{
atnStartState: atnStartState,
decision: decision,
- states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst),
+ states: nil, // Lazy initialize
}
if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
dfa.precedenceDfa = true
- dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
+ dfa.s0 = NewDFAState(-1, NewATNConfigSet(false))
dfa.s0.isAcceptState = false
dfa.s0.requiresFullContext = false
}
@@ -95,12 +96,11 @@ func (d *DFA) getPrecedenceDfa() bool {
// true or nil otherwise, and d.precedenceDfa is updated.
func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
if d.getPrecedenceDfa() != precedenceDfa {
- d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst)
+ d.states = nil // Lazy initialize
d.numstates = 0
if precedenceDfa {
- precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
-
+ precedenceState := NewDFAState(-1, NewATNConfigSet(false))
precedenceState.setEdges(make([]*DFAState, 0))
precedenceState.isAcceptState = false
precedenceState.requiresFullContext = false
@@ -113,6 +113,31 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
}
}
+// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy
+// instantiation of the states JMap.
+func (d *DFA) Len() int {
+ if d.states == nil {
+ return 0
+ }
+ return d.states.Len()
+}
+
+// Get returns a state that matches s if it is present in the DFA state set. We defer to this
+// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap.
+func (d *DFA) Get(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ return nil, false
+ }
+ return d.states.Get(s)
+}
+
+func (d *DFA) Put(s *DFAState) (*DFAState, bool) {
+ if d.states == nil {
+ d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put")
+ }
+ return d.states.Put(s)
+}
+
func (d *DFA) getS0() *DFAState {
return d.s0
}
@@ -121,9 +146,11 @@ func (d *DFA) setS0(s *DFAState) {
d.s0 = s
}
-// sortedStates returns the states in d sorted by their state number.
+// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil.
func (d *DFA) sortedStates() []*DFAState {
-
+ if d.states == nil {
+ return []*DFAState{}
+ }
vs := d.states.SortedSlice(func(i, j *DFAState) bool {
return i.stateNumber < j.stateNumber
})
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
similarity index 97%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
rename to vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
index 84d0a31e5..0e1100989 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go
@@ -10,7 +10,7 @@ import (
"strings"
)
-// DFASerializer is a DFA walker that knows how to dump them to serialized
+// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized
// strings.
type DFASerializer struct {
dfa *DFA
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
similarity index 81%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
rename to vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
index c90dec55c..654143074 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go
@@ -22,30 +22,31 @@ func (p *PredPrediction) String() string {
return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
}
-// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
+// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi,
// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
// states the ATN can be in after reading each input symbol. That is to say,
-// after reading input a1a2..an, the DFA is in a state that represents the
+// after reading input a1, a2,..an, the DFA is in a state that represents the
// subset T of the states of the ATN that are reachable from the ATN's start
-// state along some path labeled a1a2..an." In conventional NFA-to-DFA
-// conversion, therefore, the subset T would be a bitset representing the set of
-// states the ATN could be in. We need to track the alt predicted by each state
+// state along some path labeled a1a2..an."
+//
+// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of
+// states the [ATN] could be in. We need to track the alt predicted by each state
// as well, however. More importantly, we need to maintain a stack of states,
// tracking the closure operations as they jump from rule to rule, emulating
// rule invocations (method calls). I have to add a stack to simulate the proper
// lookahead sequences for the underlying LL grammar from which the ATN was
// derived.
//
-// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
-// state (ala normal conversion) and a RuleContext describing the chain of rules
+// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a
+// state (ala normal conversion) and a [RuleContext] describing the chain of rules
// (if any) followed to arrive at that state.
//
-// A DFAState may have multiple references to a particular state, but with
-// different ATN contexts (with same or different alts) meaning that state was
+// A [DFAState] may have multiple references to a particular state, but with
+// different [ATN] contexts (with same or different alts) meaning that state was
// reached via a different set of rule invocations.
type DFAState struct {
stateNumber int
- configs ATNConfigSet
+ configs *ATNConfigSet
// edges elements point to the target of the symbol. Shift up by 1 so (-1)
// Token.EOF maps to the first element.
@@ -53,7 +54,7 @@ type DFAState struct {
isAcceptState bool
- // prediction is the ttype we match or alt we predict if the state is accept.
+ // prediction is the 'ttype' we match or alt we predict if the state is 'accept'.
// Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
// requiresFullContext.
prediction int
@@ -81,9 +82,9 @@ type DFAState struct {
predicates []*PredPrediction
}
-func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
+func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState {
if configs == nil {
- configs = NewBaseATNConfigSet(false)
+ configs = NewATNConfigSet(false)
}
return &DFAState{configs: configs, stateNumber: stateNumber}
@@ -94,7 +95,7 @@ func (d *DFAState) GetAltSet() []int {
var alts []int
if d.configs != nil {
- for _, c := range d.configs.GetItems() {
+ for _, c := range d.configs.configs {
alts = append(alts, c.GetAlt())
}
}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
similarity index 92%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
rename to vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
index c55bcc19b..bd2cd8bc3 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go
@@ -33,6 +33,7 @@ type DiagnosticErrorListener struct {
exactOnly bool
}
+//goland:noinspection GoUnusedExportedFunction
func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
n := new(DiagnosticErrorListener)
@@ -42,7 +43,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
return n
}
-func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
if d.exactOnly && !exact {
return
}
@@ -55,7 +56,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) {
msg := "reportAttemptingFullContext d=" +
d.getDecisionDescription(recognizer, dfa) +
@@ -64,7 +65,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser,
recognizer.NotifyErrorListeners(msg, nil, nil)
}
-func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) {
msg := "reportContextSensitivity d=" +
d.getDecisionDescription(recognizer, dfa) +
", input='" +
@@ -96,12 +97,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa
// @param configs The conflicting or ambiguous configuration set.
// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
// returns the set of alternatives represented in {@code configs}.
-func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet {
if ReportedAlts != nil {
return ReportedAlts
}
result := NewBitSet()
- for _, c := range set.GetItems() {
+ for _, c := range set.configs {
result.add(c.GetAlt())
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
new file mode 100644
index 000000000..21a021643
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Provides an empty default implementation of {@link ANTLRErrorListener}. The
+// default implementation of each method does nothing, but can be overridden as
+// necessary.
+
+type ErrorListener interface {
+ SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet)
+}
+
+type DefaultErrorListener struct {
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewDefaultErrorListener() *DefaultErrorListener {
+ return new(DefaultErrorListener)
+}
+
+func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) {
+}
+
+func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) {
+}
+
+type ConsoleErrorListener struct {
+ *DefaultErrorListener
+}
+
+func NewConsoleErrorListener() *ConsoleErrorListener {
+ return new(ConsoleErrorListener)
+}
+
+// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}.
+var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
+
+// SyntaxError prints messages to System.err containing the
+// values of line, charPositionInLine, and msg using
+// the following format:
+//
+// line :
+func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) {
+ _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+}
+
+type ProxyErrorListener struct {
+ *DefaultErrorListener
+ delegates []ErrorListener
+}
+
+func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
+ if delegates == nil {
+ panic("delegates is not provided")
+ }
+ l := new(ProxyErrorListener)
+ l.delegates = delegates
+ return l
+}
+
+func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ for _, d := range p.delegates {
+ d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
new file mode 100644
index 000000000..9db2be1c7
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go
@@ -0,0 +1,702 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type ErrorStrategy interface {
+ reset(Parser)
+ RecoverInline(Parser) Token
+ Recover(Parser, RecognitionException)
+ Sync(Parser)
+ InErrorRecoveryMode(Parser) bool
+ ReportError(Parser, RecognitionException)
+ ReportMatch(Parser)
+}
+
+// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for
+// error reporting and recovery in ANTLR parsers.
+type DefaultErrorStrategy struct {
+ errorRecoveryMode bool
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+
+ d := new(DefaultErrorStrategy)
+
+ // Indicates whether the error strategy is currently "recovering from an
+ // error". This is used to suppress Reporting multiple error messages while
+ // attempting to recover from a detected syntax error.
+ //
+ // @see //InErrorRecoveryMode
+ //
+ d.errorRecoveryMode = false
+
+ // The index into the input stream where the last error occurred.
+ // This is used to prevent infinite loops where an error is found
+ // but no token is consumed during recovery...another error is found,
+ // ad nauseam. This is a failsafe mechanism to guarantee that at least
+ // one token/tree node is consumed for two errors.
+ //
+ d.lastErrorIndex = -1
+ d.lastErrorStates = nil
+ return d
+}
+
+// The default implementation simply calls {@link //endErrorCondition} to
+// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// This method is called to enter error recovery mode when a recognition
+// exception is Reported.
+func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) {
+ d.errorRecoveryMode = true
+}
+
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool {
+ return d.errorRecoveryMode
+}
+
+// This method is called to leave error recovery mode after recovering from
+// a recognition exception.
+func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) {
+ d.errorRecoveryMode = false
+ d.lastErrorStates = nil
+ d.lastErrorIndex = -1
+}
+
+// ReportMatch is the default implementation of error matching and simply calls endErrorCondition.
+func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+// ReportError is the default implementation of error reporting.
+// It returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls [beginErrorCondition]
+// and dispatches the Reporting task based on the runtime type of e
+// according to the following table.
+//
+// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative]
+// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch]
+// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate]
+// All other types : Calls [NotifyErrorListeners] to Report the exception
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
+ // if we've already Reported an error and have not Matched a token
+ // yet successfully, don't Report any errors.
+ if d.InErrorRecoveryMode(recognizer) {
+ return // don't Report spurious errors
+ }
+ d.beginErrorCondition(recognizer)
+
+ switch t := e.(type) {
+ default:
+ fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
+ // fmt.Println(e.stack)
+ recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
+ case *NoViableAltException:
+ d.ReportNoViableAlternative(recognizer, t)
+ case *InputMisMatchException:
+ d.ReportInputMisMatch(recognizer, t)
+ case *FailedPredicateException:
+ d.ReportFailedPredicate(recognizer, t)
+ }
+}
+
+// Recover is the default recovery implementation.
+// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set -
+// loosely the set of tokens that can follow the current rule.
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) {
+
+ if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
+ d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
+ // uh oh, another error at same token index and previously-Visited
+ // state in ATN must be a case where LT(1) is in the recovery
+ // token set so nothing got consumed. Consume a single token
+ // at least to prevent an infinite loop d is a failsafe.
+ recognizer.Consume()
+ }
+ d.lastErrorIndex = recognizer.GetInputStream().Index()
+ if d.lastErrorStates == nil {
+ d.lastErrorStates = NewIntervalSet()
+ }
+ d.lastErrorStates.addOne(recognizer.GetState())
+ followSet := d.GetErrorRecoverySet(recognizer)
+ d.consumeUntil(recognizer, followSet)
+}
+
+// Sync is the default implementation of error strategy synchronization.
+//
+// This Sync makes sure that the current lookahead symbol is consistent with what were expecting
+// at this point in the [ATN]. You can call this anytime but ANTLR only
+// generates code to check before sub-rules/loops and each iteration.
+//
+// Implements [Jim Idle]'s magic Sync mechanism in closures and optional
+// sub-rules. E.g.:
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
+// At the start of a sub-rule upon error, Sync performs single
+// token deletion, if possible. If it can't do that, it bails on the current
+// rule and uses the default error recovery, which consumes until the
+// reSynchronization set of the current rule.
+//
+// If the sub-rule is optional
+//
+// ({@code (...)?}, {@code (...)*},
+//
+// or a block with an empty alternative), then the expected set includes what follows
+// the sub-rule.
+//
+// During loop iteration, it consumes until it sees a token that can start a
+// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
+//
+// # Origins
+//
+// Previous versions of ANTLR did a poor job of their recovery within loops.
+// A single mismatch token or missing token would force the parser to bail
+// out of the entire rules surrounding the loop. So, for rule:
+//
+// classfunc : 'class' ID '{' member* '}'
+//
+// input with an extra token between members would force the parser to
+// consume until it found the next class definition rather than the next
+// member definition of the current class.
+//
+// This functionality cost a bit of effort because the parser has to
+// compare the token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off this
+// functionality by simply overriding this method as empty:
+//
+// { }
+//
+// [Jim Idle]: https://github.com/jimidle
+func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
+ // If already recovering, don't try to Sync
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ la := recognizer.GetTokenStream().LA(1)
+
+ // try cheaper subset first might get lucky. seems to shave a wee bit off
+ nextTokens := recognizer.GetATN().NextTokens(s, nil)
+ if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
+ return
+ }
+
+ switch s.GetStateType() {
+ case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
+ // Report error and recover if possible
+ if d.SingleTokenDeletion(recognizer) != nil {
+ return
+ }
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ case ATNStatePlusLoopBack, ATNStateStarLoopBack:
+ d.ReportUnwantedToken(recognizer)
+ expecting := NewIntervalSet()
+ expecting.addSet(recognizer.GetExpectedTokens())
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer))
+ d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
+ default:
+ // do nothing if we can't identify the exact kind of ATN state
+ }
+}
+
+// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException].
+//
+// See also [ReportError]
+func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
+ tokens := recognizer.GetTokenStream()
+ var input string
+ if tokens != nil {
+ if e.startToken.GetTokenType() == TokenEOF {
+ input = ""
+ } else {
+ input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
+ }
+ } else {
+ input = ""
+ }
+ msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException]
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) +
+ " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException].
+//
+// See also: [ReportError]
+func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
+ ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
+ msg := "rule " + ruleName + " " + e.message
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// ReportUnwantedToken is called to report a syntax error that requires the removal
+// of a token from the input stream. At the time d method is called, the
+// erroneous symbol is the current LT(1) symbol and has not yet been
+// removed from the input stream. When this method returns,
+// recognizer is in error recovery mode.
+//
+// This method is called when singleTokenDeletion identifies
+// single-token deletion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling
+// [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ tokenName := d.GetTokenErrorDisplay(t)
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "extraneous input " + tokenName + " expecting " +
+ expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// ReportMissingToken is called to report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time this
+// method is called, the missing token has not yet been inserted. When this
+// method returns, recognizer is in error recovery mode.
+//
+// This method is called when singleTokenInsertion identifies
+// single-token insertion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls beginErrorCondition to
+// enter error recovery mode, followed by calling [NotifyErrorListeners]
+func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
+ " at " + d.GetTokenErrorDisplay(t)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// The RecoverInline default implementation attempts to recover from the mismatched input
+// by using single token insertion and deletion as described below. If the
+// recovery attempt fails, this method panics with [InputMisMatchException}.
+// TODO: Not sure that panic() is the right thing to do here - JI
+//
+// # EXTRA TOKEN (single token deletion)
+//
+// LA(1) is not what we are looking for. If LA(2) has the
+// right token, however, then assume LA(1) is some extra spurious
+// token and delete it. Then consume and return the next token (which was
+// the LA(2) token) as the successful result of the Match operation.
+//
+// # This recovery strategy is implemented by singleTokenDeletion
+//
+// # MISSING TOKEN (single token insertion)
+//
+// If current token -at LA(1) - is consistent with what could come
+// after the expected LA(1) token, then assume the token is missing
+// and use the parser's [TokenFactory] to create it on the fly. The
+// “insertion” is performed by returning the created token as the successful
+// result of the Match operation.
+//
+// This recovery strategy is implemented by [SingleTokenInsertion].
+//
+// # Example
+//
+// For example, Input i=(3 is clearly missing the ')'. When
+// the parser returns from the nested call to expr, it will have
+// call the chain:
+//
+// stat → expr → atom
+//
+// and it will be trying to Match the ')' at this point in the
+// derivation:
+//
+// : ID '=' '(' INT ')' ('+' atom)* ';'
+// ^
+//
+// The attempt to [Match] ')' will fail when it sees ';' and
+// call [RecoverInline]. To recover, it sees that LA(1)==';'
+// is in the set of tokens that can follow the ')' token reference
+// in rule atom. It can assume that you forgot the ')'.
+func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
+ // SINGLE TOKEN DELETION
+ MatchedSymbol := d.SingleTokenDeletion(recognizer)
+ if MatchedSymbol != nil {
+ // we have deleted the extra token.
+ // now, move past ttype token as if all were ok
+ recognizer.Consume()
+ return MatchedSymbol
+ }
+ // SINGLE TOKEN INSERTION
+ if d.SingleTokenInsertion(recognizer) {
+ return d.GetMissingSymbol(recognizer)
+ }
+ // even that didn't work must panic the exception
+ recognizer.SetError(NewInputMisMatchException(recognizer))
+ return nil
+}
+
+// SingleTokenInsertion implements the single-token insertion inline error recovery
+// strategy. It is called by [RecoverInline] if the single-token
+// deletion strategy fails to recover from the mismatched input. If this
+// method returns {@code true}, {@code recognizer} will be in error recovery
+// mode.
+//
+// This method determines whether single-token insertion is viable by
+// checking if the LA(1) input symbol could be successfully Matched
+// if it were instead the LA(2) symbol. If this method returns
+// {@code true}, the caller is responsible for creating and inserting a
+// token with the correct type to produce this behavior.
+//
+// This func returns true if single-token insertion is a viable recovery
+// strategy for the current mismatched input.
+func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
+ currentSymbolType := recognizer.GetTokenStream().LA(1)
+ // if current token is consistent with what could come after current
+ // ATN state, then we know we're missing a token error recovery
+ // is free to conjure up and insert the missing token
+ atn := recognizer.GetInterpreter().atn
+ currentState := atn.states[recognizer.GetState()]
+ next := currentState.GetTransitions()[0].getTarget()
+ expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
+ if expectingAtLL2.contains(currentSymbolType) {
+ d.ReportMissingToken(recognizer)
+ return true
+ }
+
+ return false
+}
+
+// SingleTokenDeletion implements the single-token deletion inline error recovery
+// strategy. It is called by [RecoverInline] to attempt to recover
+// from mismatched input. If this method returns nil, the parser and error
+// handler state will not have changed. If this method returns non-nil,
+// recognizer will not be in error recovery mode since the
+// returned token was a successful Match.
+//
+// If the single-token deletion is successful, this method calls
+// [ReportUnwantedToken] to Report the error, followed by
+// [Consume] to actually “delete” the extraneous token. Then,
+// before returning, [ReportMatch] is called to signal a successful
+// Match.
+//
+// The func returns the successfully Matched [Token] instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise nil.
+func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
+ NextTokenType := recognizer.GetTokenStream().LA(2)
+ expecting := d.GetExpectedTokens(recognizer)
+ if expecting.contains(NextTokenType) {
+ d.ReportUnwantedToken(recognizer)
+ // print("recoverFromMisMatchedToken deleting " \
+ // + str(recognizer.GetTokenStream().LT(1)) \
+ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
+ // + " is what we want", file=sys.stderr)
+ recognizer.Consume() // simply delete extra token
+ // we want to return the token we're actually Matching
+ MatchedSymbol := recognizer.GetCurrentToken()
+ d.ReportMatch(recognizer) // we know current token is correct
+ return MatchedSymbol
+ }
+
+ return nil
+}
+
+// GetMissingSymbol conjures up a missing token during error recovery.
+//
+// The recognizer attempts to recover from single missing
+// symbols. But, actions might refer to that missing symbol.
+// For example:
+//
+// x=ID {f($x)}.
+//
+// The action clearly assumes
+// that there has been an identifier Matched previously and that
+// $x points at that token. If that token is missing, but
+// the next token in the stream is what we want we assume that
+// this token is missing, and we keep going. Because we
+// have to return some token to replace the missing token,
+// we have to conjure one up. This method gives the user control
+// over the tokens returned for missing tokens. Mostly,
+// you will want to create something special for identifier
+// tokens. For literals such as '{' and ',', the default
+// action in the parser or tree parser works. It simply creates
+// a [CommonToken] of the appropriate type. The text will be the token name.
+// If you need to change which tokens must be created by the lexer,
+// override this method to create the appropriate tokens.
+func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
+ currentSymbol := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ expectedTokenType := expecting.first()
+ var tokenText string
+
+ if expectedTokenType == TokenEOF {
+ tokenText = ""
+ } else {
+ ln := recognizer.GetLiteralNames()
+ if expectedTokenType > 0 && expectedTokenType < len(ln) {
+ tokenText = ""
+ } else {
+ tokenText = "" // TODO: matches the JS impl
+ }
+ }
+ current := currentSymbol
+ lookback := recognizer.GetTokenStream().LT(-1)
+ if current.GetTokenType() == TokenEOF && lookback != nil {
+ current = lookback
+ }
+
+ tf := recognizer.GetTokenFactory()
+
+ return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
+}
+
+func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
+ return recognizer.GetExpectedTokens()
+}
+
+// GetTokenErrorDisplay determines how a token should be displayed in an error message.
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override this func in that case
+// to use t.String() (which, for [CommonToken], dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a new type.
+func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ return d.escapeWSAndQuote(s)
+}
+
+func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ return "'" + s + "'"
+}
+
+// GetErrorRecoverySet computes the error recovery set for the current rule. During
+// rule invocation, the parser pushes the set of tokens that can
+// follow that rule reference on the stack. This amounts to
+// computing FIRST of what follows the rule reference in the
+// enclosing rule. See LinearApproximator.FIRST().
+//
+// This local follow set only includes tokens
+// from within the rule i.e., the FIRST computation done by
+// ANTLR stops at the end of a rule.
+//
+// # Example
+//
+// When you find a "no viable alt exception", the input is not
+// consistent with any of the alternatives for rule r. The best
+// thing to do is to consume tokens until you see something that
+// can legally follow a call to r or any rule that called r.
+// You don't want the exact set of viable next tokens because the
+// input might just be missing a token--you might consume the
+// rest of the input looking for one of the missing tokens.
+//
+// Consider the grammar:
+//
+// a : '[' b ']'
+// | '(' b ')'
+// ;
+//
+// b : c '^' INT
+// ;
+//
+// c : ID
+// | INT
+// ;
+//
+// At each rule invocation, the set of tokens that could follow
+// that rule is pushed on a stack. Here are the various
+// context-sensitive follow sets:
+//
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
+//
+// Upon erroneous input “[]”, the call chain is
+//
+// a → b → c
+//
+// and, hence, the follow context stack is:
+//
+// Depth Follow set Start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
+//
+// Notice that ')' is not included, because b would have to have
+// been called from a different context in rule a for ')' to be
+// included.
+//
+// For error recovery, we cannot consider FOLLOW(c)
+// (context-sensitive or otherwise). We need the combined set of
+// all context-sensitive FOLLOW sets - the set of all tokens that
+// could follow any reference in the call chain. We need to
+// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
+// we reSync'd to that token, we'd consume until EOF. We need to
+// Sync to context-sensitive FOLLOWs for a, b, and c:
+//
+// {']','^'}
+//
+// In this case, for input "[]", LA(1) is ']' and in the set, so we would
+// not consume anything. After printing an error, rule c would
+// return normally. Rule b would not find the required '^' though.
+// At this point, it gets a mismatched token error and panics an
+// exception (since LA(1) is not in the viable following token
+// set). The rule exception handler tries to recover, but finds
+// the same recovery set and doesn't consume anything. Rule b
+// exits normally returning to rule a. Now it finds the ']' (and
+// with the successful Match exits errorRecovery mode).
+//
+// So, you can see that the parser walks up the call chain looking
+// for the token that was a member of the recovery set.
+//
+// Errors are not generated in errorRecovery mode.
+//
+// ANTLR's error recovery mechanism is based upon original ideas:
+//
+// [Algorithms + Data Structures = Programs] by Niklaus Wirth and
+// [A note on error recovery in recursive descent parsers].
+//
+// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers]
+//
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead
+// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs
+//
+// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905
+// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE
+// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet {
+ atn := recognizer.GetInterpreter().atn
+ ctx := recognizer.GetParserRuleContext()
+ recoverSet := NewIntervalSet()
+ for ctx != nil && ctx.GetInvokingState() >= 0 {
+ // compute what follows who invoked us
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ recoverSet.addSet(follow)
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ recoverSet.removeOne(TokenEpsilon)
+ return recoverSet
+}
+
+// Consume tokens until one Matches the given token set.//
+func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
+ ttype := recognizer.GetTokenStream().LA(1)
+ for ttype != TokenEOF && !set.contains(ttype) {
+ recognizer.Consume()
+ ttype = recognizer.GetTokenStream().LA(1)
+ }
+}
+
+// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors
+// by immediately canceling the parse operation with a
+// [ParseCancellationException]. The implementation ensures that the
+// [ParserRuleContext//exception] field is set for all parse tree nodes
+// that were not completed prior to encountering the error.
+//
+// This error strategy is useful in the following scenarios.
+//
+// - Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of [BailErrorStrategy.Sync] improves the performance of
+// the first stage.
+//
+// - Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the [BailErrorStrategy] avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
+// myparser.SetErrorHandler(NewBailErrorStrategy())
+//
+// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)]
+type BailErrorStrategy struct {
+ *DefaultErrorStrategy
+}
+
+var _ ErrorStrategy = &BailErrorStrategy{}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewBailErrorStrategy() *BailErrorStrategy {
+
+ b := new(BailErrorStrategy)
+
+ b.DefaultErrorStrategy = NewDefaultErrorStrategy()
+
+ return b
+}
+
+// Recover Instead of recovering from exception e, re-panic it wrapped
+// in a [ParseCancellationException] so it is not caught by the
+// rule func catches. Use Exception.GetCause() to get the
+// original [RecognitionException].
+func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+ context := recognizer.GetParserRuleContext()
+ for context != nil {
+ context.SetException(e)
+ if parent, ok := context.GetParent().(ParserRuleContext); ok {
+ context = parent
+ } else {
+ context = nil
+ }
+ }
+ recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly
+}
+
+// RecoverInline makes sure we don't attempt to recover inline if the parser
+// successfully recovers, it won't panic an exception.
+func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
+ b.Recover(recognizer, NewInputMisMatchException(recognizer))
+
+ return nil
+}
+
+// Sync makes sure we don't attempt to recover from problems in sub-rules.
+func (b *BailErrorStrategy) Sync(_ Parser) {
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/errors.go b/vendor/github.com/antlr4-go/antlr/v4/errors.go
new file mode 100644
index 000000000..8f0f2f601
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/errors.go
@@ -0,0 +1,259 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+// 3 kinds of errors: prediction errors, failed predicate errors, and
+// mismatched input errors. In each case, the parser knows where it is
+// in the input, where it is in the ATN, the rule invocation stack,
+// and what kind of problem occurred.
+
+type RecognitionException interface {
+ GetOffendingToken() Token
+ GetMessage() string
+ GetInputStream() IntStream
+}
+
+type BaseRecognitionException struct {
+ message string
+ recognizer Recognizer
+ offendingToken Token
+ offendingState int
+ ctx RuleContext
+ input IntStream
+}
+
+func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
+
+ // todo
+ // Error.call(this)
+ //
+ // if (!!Error.captureStackTrace) {
+ // Error.captureStackTrace(this, RecognitionException)
+ // } else {
+ // stack := NewError().stack
+ // }
+ // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int
+
+ t := new(BaseRecognitionException)
+
+ t.message = message
+ t.recognizer = recognizer
+ t.input = input
+ t.ctx = ctx
+
+ // The current Token when an error occurred. Since not all streams
+ // support accessing symbols by index, we have to track the {@link Token}
+ // instance itself.
+ //
+ t.offendingToken = nil
+
+ // Get the ATN state number the parser was in at the time the error
+ // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the
+ // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match.
+ //
+ t.offendingState = -1
+ if t.recognizer != nil {
+ t.offendingState = t.recognizer.GetState()
+ }
+
+ return t
+}
+
+func (b *BaseRecognitionException) GetMessage() string {
+ return b.message
+}
+
+func (b *BaseRecognitionException) GetOffendingToken() Token {
+ return b.offendingToken
+}
+
+func (b *BaseRecognitionException) GetInputStream() IntStream {
+ return b.input
+}
+
+// If the state number is not known, b method returns -1.
+
+// getExpectedTokens gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time this exception was raised.
+//
+// If the set of expected tokens is not known and could not be computed,
+// this method returns nil.
+//
+// The func returns the set of token types that could potentially follow the current
+// state in the {ATN}, or nil if the information is not available.
+
+func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
+ if b.recognizer != nil {
+ return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
+ }
+
+ return nil
+}
+
+func (b *BaseRecognitionException) String() string {
+ return b.message
+}
+
+type LexerNoViableAltException struct {
+ *BaseRecognitionException
+
+ startIndex int
+ deadEndConfigs *ATNConfigSet
+}
+
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException {
+
+ l := new(LexerNoViableAltException)
+
+ l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
+
+ l.startIndex = startIndex
+ l.deadEndConfigs = deadEndConfigs
+
+ return l
+}
+
+func (l *LexerNoViableAltException) String() string {
+ symbol := ""
+ if l.startIndex >= 0 && l.startIndex < l.input.Size() {
+ symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
+ }
+ return "LexerNoViableAltException" + symbol
+}
+
+type NoViableAltException struct {
+ *BaseRecognitionException
+
+ startToken Token
+ offendingToken Token
+ ctx ParserRuleContext
+ deadEndConfigs *ATNConfigSet
+}
+
+// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths
+// to take based upon the remaining input. It tracks the starting token
+// of the offending input and also knows where the parser was
+// in the various paths when the error.
+//
+// Reported by [ReportNoViableAlternative]
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+
+ if ctx == nil {
+ ctx = recognizer.GetParserRuleContext()
+ }
+
+ if offendingToken == nil {
+ offendingToken = recognizer.GetCurrentToken()
+ }
+
+ if startToken == nil {
+ startToken = recognizer.GetCurrentToken()
+ }
+
+ if input == nil {
+ input = recognizer.GetInputStream().(TokenStream)
+ }
+
+ n := new(NoViableAltException)
+ n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
+
+ // Which configurations did we try at input.Index() that couldn't Match
+ // input.LT(1)
+ n.deadEndConfigs = deadEndConfigs
+
+ // The token object at the start index the input stream might
+ // not be buffering tokens so get a reference to it.
+ //
+ // At the time the error occurred, of course the stream needs to keep a
+ // buffer of all the tokens, but later we might not have access to those.
+ n.startToken = startToken
+ n.offendingToken = offendingToken
+
+ return n
+}
+
+type InputMisMatchException struct {
+ *BaseRecognitionException
+}
+
+// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as
+// when the current input does not Match the expected token.
+func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
+
+ i := new(InputMisMatchException)
+ i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ i.offendingToken = recognizer.GetCurrentToken()
+
+ return i
+
+}
+
+// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates
+// occurs when normally parsing the alternative just like Matching a token.
+// Disambiguating predicate evaluation occurs when we test a predicate during
+// prediction.
+type FailedPredicateException struct {
+ *BaseRecognitionException
+
+ ruleIndex int
+ predicateIndex int
+ predicate string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
+
+ f := new(FailedPredicateException)
+
+ f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ trans := s.GetTransitions()[0]
+ if trans2, ok := trans.(*PredicateTransition); ok {
+ f.ruleIndex = trans2.ruleIndex
+ f.predicateIndex = trans2.predIndex
+ } else {
+ f.ruleIndex = 0
+ f.predicateIndex = 0
+ }
+ f.predicate = predicate
+ f.offendingToken = recognizer.GetCurrentToken()
+
+ return f
+}
+
+func (f *FailedPredicateException) formatMessage(predicate, message string) string {
+ if message != "" {
+ return message
+ }
+
+ return "failed predicate: {" + predicate + "}?"
+}
+
+type ParseCancellationException struct {
+}
+
+func (p ParseCancellationException) GetOffendingToken() Token {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetMessage() string {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (p ParseCancellationException) GetInputStream() IntStream {
+ //TODO implement me
+ panic("implement me")
+}
+
+func NewParseCancellationException() *ParseCancellationException {
+ // Error.call(this)
+ // Error.captureStackTrace(this, ParseCancellationException)
+ return new(ParseCancellationException)
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
new file mode 100644
index 000000000..5f65f809b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "os"
+)
+
+// This is an InputStream that is loaded from a file all at once
+// when you construct the object.
+
+type FileStream struct {
+ InputStream
+ filename string
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewFileStream(fileName string) (*FileStream, error) {
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func(f *os.File) {
+ errF := f.Close()
+ if errF != nil {
+ }
+ }(f)
+
+ reader := bufio.NewReader(f)
+ fInfo, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ fs := &FileStream{
+ InputStream: InputStream{
+ index: 0,
+ name: fileName,
+ },
+ filename: fileName,
+ }
+
+ // Pre-build the buffer and read runes efficiently
+ //
+ fs.data = make([]rune, 0, fInfo.Size())
+ for {
+ r, _, err := reader.ReadRune()
+ if err != nil {
+ break
+ }
+ fs.data = append(fs.data, r)
+ }
+ fs.size = len(fs.data) // Size in runes
+
+ // All done.
+ //
+ return fs, nil
+}
+
+func (f *FileStream) GetSourceName() string {
+ return f.filename
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
new file mode 100644
index 000000000..b737fe85f
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go
@@ -0,0 +1,157 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bufio"
+ "io"
+)
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+// NewIoStream creates a new input stream from the given io.Reader reader.
+// Note that the reader is read completely into memory and so it must actually
+// have a stopping point - you cannot pass in a reader on an open-ended source such
+// as a socket for instance.
+func NewIoStream(reader io.Reader) *InputStream {
+
+ rReader := bufio.NewReader(reader)
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ }
+
+ // Pre-build the buffer and read runes reasonably efficiently given that
+ // we don't exactly know how big the input is.
+ //
+ is.data = make([]rune, 0, 512)
+ for {
+ r, _, err := rReader.ReadRune()
+ if err != nil {
+ break
+ }
+ is.data = append(is.data, r)
+ }
+ is.size = len(is.data) // number of runes
+ return is
+}
+
+// NewInputStream creates a new input stream from the given string
+func NewInputStream(data string) *InputStream {
+
+ is := &InputStream{
+ name: "",
+ index: 0,
+ data: []rune(data), // This is actually the most efficient way
+ }
+ is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+// Consume moves the input pointer to the next character in the input stream
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+// LA returns the character at the given offset from the start of the input stream
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+// LT returns the character at the given offset from the start of the input stream
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+// Index returns the current offset in to the input stream
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+// Size returns the total number of characters in the input stream
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// Mark does nothing here as we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+// Release does nothing here as we have entire buffer
+func (is *InputStream) Release(_ int) {
+}
+
+// Seek the input point to the provided index offset
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+// GetText returns the text from the input stream from the start to the stop index
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last
+// character of the stop token
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return ""
+}
+
+// String returns the entire input stream as a string
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go
rename to vendor/github.com/antlr4-go/antlr/v4/int_stream.go
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
similarity index 82%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
rename to vendor/github.com/antlr4-go/antlr/v4/interval_set.go
index c1e155e81..cc5066067 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go
@@ -14,20 +14,21 @@ type Interval struct {
Stop int
}
-/* stop is not included! */
-func NewInterval(start, stop int) *Interval {
- i := new(Interval)
-
- i.Start = start
- i.Stop = stop
- return i
+// NewInterval creates a new interval with the given start and stop values.
+func NewInterval(start, stop int) Interval {
+ return Interval{
+ Start: start,
+ Stop: stop,
+ }
}
-func (i *Interval) Contains(item int) bool {
+// Contains returns true if the given item is contained within the interval.
+func (i Interval) Contains(item int) bool {
return item >= i.Start && item < i.Stop
}
-func (i *Interval) String() string {
+// String generates a string representation of the interval.
+func (i Interval) String() string {
if i.Start == i.Stop-1 {
return strconv.Itoa(i.Start)
}
@@ -35,15 +36,18 @@ func (i *Interval) String() string {
return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
}
-func (i *Interval) length() int {
+// Length returns the length of the interval.
+func (i Interval) Length() int {
return i.Stop - i.Start
}
+// IntervalSet represents a collection of [Intervals], which may be read-only.
type IntervalSet struct {
- intervals []*Interval
+ intervals []Interval
readOnly bool
}
+// NewIntervalSet creates a new empty, writable, interval set.
func NewIntervalSet() *IntervalSet {
i := new(IntervalSet)
@@ -54,6 +58,20 @@ func NewIntervalSet() *IntervalSet {
return i
}
+func (i *IntervalSet) Equals(other *IntervalSet) bool {
+ if len(i.intervals) != len(other.intervals) {
+ return false
+ }
+
+ for k, v := range i.intervals {
+ if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop {
+ return false
+ }
+ }
+
+ return true
+}
+
func (i *IntervalSet) first() int {
if len(i.intervals) == 0 {
return TokenInvalidType
@@ -70,16 +88,16 @@ func (i *IntervalSet) addRange(l, h int) {
i.addInterval(NewInterval(l, h+1))
}
-func (i *IntervalSet) addInterval(v *Interval) {
+func (i *IntervalSet) addInterval(v Interval) {
if i.intervals == nil {
- i.intervals = make([]*Interval, 0)
+ i.intervals = make([]Interval, 0)
i.intervals = append(i.intervals, v)
} else {
// find insert pos
for k, interval := range i.intervals {
// distinct range -> insert
if v.Stop < interval.Start {
- i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...)
return
} else if v.Stop == interval.Start {
i.intervals[k].Start = v.Start
@@ -139,16 +157,16 @@ func (i *IntervalSet) contains(item int) bool {
}
func (i *IntervalSet) length() int {
- len := 0
+ iLen := 0
for _, v := range i.intervals {
- len += v.length()
+ iLen += v.Length()
}
- return len
+ return iLen
}
-func (i *IntervalSet) removeRange(v *Interval) {
+func (i *IntervalSet) removeRange(v Interval) {
if v.Start == v.Stop-1 {
i.removeOne(v.Start)
} else if i.intervals != nil {
@@ -162,7 +180,7 @@ func (i *IntervalSet) removeRange(v *Interval) {
i.intervals[k] = NewInterval(ni.Start, v.Start)
x := NewInterval(v.Stop, ni.Stop)
// i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
return
} else if v.Start <= ni.Start && v.Stop >= ni.Stop {
// i.intervals.splice(k, 1)
@@ -199,7 +217,7 @@ func (i *IntervalSet) removeOne(v int) {
x := NewInterval(ki.Start, v)
ki.Start = v + 1
// i.intervals.splice(k, 0, x)
- i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...)
return
}
}
@@ -223,7 +241,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin
return i.toIndexString()
}
-func (i *IntervalSet) GetIntervals() []*Interval {
+func (i *IntervalSet) GetIntervals() []Interval {
return i.intervals
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
new file mode 100644
index 000000000..ceccd96d2
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go
@@ -0,0 +1,685 @@
+package antlr
+
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+import (
+ "container/list"
+ "runtime/debug"
+ "sort"
+ "sync"
+)
+
+// Collectable is an interface that a struct should implement if it is to be
+// usable as a key in these collections.
+type Collectable[T any] interface {
+ Hash() int
+ Equals(other Collectable[T]) bool
+}
+
+type Comparator[T any] interface {
+ Hash1(o T) int
+ Equals2(T, T) bool
+}
+
+type CollectionSource int
+type CollectionDescriptor struct {
+ SybolicName string
+ Description string
+}
+
+const (
+ UnknownCollection CollectionSource = iota
+ ATNConfigLookupCollection
+ ATNStateCollection
+ DFAStateCollection
+ ATNConfigCollection
+ PredictionContextCollection
+ SemanticContextCollection
+ ClosureBusyCollection
+ PredictionVisitedCollection
+ MergeCacheCollection
+ PredictionContextCacheCollection
+ AltSetCollection
+ ReachSetCollection
+)
+
+var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{
+ UnknownCollection: {
+ SybolicName: "UnknownCollection",
+ Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.",
+ },
+ ATNConfigCollection: {
+ SybolicName: "ATNConfigCollection",
+ Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "For instance, it is used to store the results of the closure() operation in the ATN.",
+ },
+ ATNConfigLookupCollection: {
+ SybolicName: "ATNConfigLookupCollection",
+ Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." +
+ "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.",
+ },
+ ATNStateCollection: {
+ SybolicName: "ATNStateCollection",
+ Description: "ATNState collection. This is used to store the states of the ATN.",
+ },
+ DFAStateCollection: {
+ SybolicName: "DFAStateCollection",
+ Description: "DFAState collection. This is used to store the states of the DFA.",
+ },
+ PredictionContextCollection: {
+ SybolicName: "PredictionContextCollection",
+ Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.",
+ },
+ SemanticContextCollection: {
+ SybolicName: "SemanticContextCollection",
+ Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.",
+ },
+ ClosureBusyCollection: {
+ SybolicName: "ClosureBusyCollection",
+ Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." +
+ "It stores ATNConfigs that are currently being processed in the closure() operation.",
+ },
+ PredictionVisitedCollection: {
+ SybolicName: "PredictionVisitedCollection",
+ Description: "A map that records whether we have visited a particular context when searching through cached entries.",
+ },
+ MergeCacheCollection: {
+ SybolicName: "MergeCacheCollection",
+ Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.",
+ },
+ PredictionContextCacheCollection: {
+ SybolicName: "PredictionContextCacheCollection",
+ Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.",
+ },
+ AltSetCollection: {
+ SybolicName: "AltSetCollection",
+ Description: "Used to eliminate duplicate alternatives in an ATN config set.",
+ },
+ ReachSetCollection: {
+ SybolicName: "ReachSetCollection",
+ Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.",
+ },
+}
+
+// JStore implements a container that allows the use of a struct to calculate the key
+// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just
+// serve the needs of the ANTLR Go runtime.
+//
+// For ease of porting the logic of the runtime from the master target (Java), this collection
+// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals()
+// function as the key. The values are stored in a standard go map which internally is a form of hashmap
+// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with
+// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't
+// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and
+// we understand the requirements, then this is fine - this is not a general purpose collection.
+type JStore[T any, C Comparator[T]] struct {
+ store map[int][]T
+ len int
+ comparator Comparator[T]
+ stats *JStatRec
+}
+
+func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] {
+
+ if comparator == nil {
+ panic("comparator cannot be nil")
+ }
+
+ s := &JStore[T, C]{
+ store: make(map[int][]T, 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ s.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ s.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(s.stats)
+ }
+ return s
+}
+
+// Put will store given value in the collection. Note that the key for storage is generated from
+// the value itself - this is specifically because that is what ANTLR needs - this would not be useful
+// as any kind of general collection.
+//
+// If the key has a hash conflict, then the value will be added to the slice of values associated with the
+// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is
+// tested by calling the equals() method on the key.
+//
+// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true
+//
+// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false.
+func (s *JStore[T, C]) Put(value T) (v T, exists bool) {
+
+ if collectStats {
+ s.stats.Puts++
+ }
+ kh := s.comparator.Hash1(value)
+
+ var hClash bool
+ for _, v1 := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(value, v1) {
+ if collectStats {
+ s.stats.PutHits++
+ s.stats.PutHashConflicts++
+ }
+ return v1, true
+ }
+ if collectStats {
+ s.stats.PutMisses++
+ }
+ }
+ if collectStats && hClash {
+ s.stats.PutHashConflicts++
+ }
+ s.store[kh] = append(s.store[kh], value)
+
+ if collectStats {
+ if len(s.store[kh]) > s.stats.MaxSlotSize {
+ s.stats.MaxSlotSize = len(s.store[kh])
+ }
+ }
+ s.len++
+ if collectStats {
+ s.stats.CurSize = s.len
+ if s.len > s.stats.MaxSize {
+ s.stats.MaxSize = s.len
+ }
+ }
+ return value, false
+}
+
+// Get will return the value associated with the key - the type of the key is the same type as the value
+// which would not generally be useful, but this is a specific thing for ANTLR where the key is
+// generated using the object we are going to store.
+func (s *JStore[T, C]) Get(key T) (T, bool) {
+ if collectStats {
+ s.stats.Gets++
+ }
+ kh := s.comparator.Hash1(key)
+ var hClash bool
+ for _, v := range s.store[kh] {
+ hClash = true
+ if s.comparator.Equals2(key, v) {
+ if collectStats {
+ s.stats.GetHits++
+ s.stats.GetHashConflicts++
+ }
+ return v, true
+ }
+ if collectStats {
+ s.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ s.stats.GetHashConflicts++
+ }
+ s.stats.GetNoEnt++
+ }
+ return key, false
+}
+
+// Contains returns true if the given key is present in the store
+func (s *JStore[T, C]) Contains(key T) bool {
+ _, present := s.Get(key)
+ return present
+}
+
+func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T {
+ vs := make([]T, 0, len(s.store))
+ for _, v := range s.store {
+ vs = append(vs, v...)
+ }
+ sort.Slice(vs, func(i, j int) bool {
+ return less(vs[i], vs[j])
+ })
+
+ return vs
+}
+
+func (s *JStore[T, C]) Each(f func(T) bool) {
+ for _, e := range s.store {
+ for _, v := range e {
+ f(v)
+ }
+ }
+}
+
+func (s *JStore[T, C]) Len() int {
+ return s.len
+}
+
+func (s *JStore[T, C]) Values() []T {
+ vs := make([]T, 0, len(s.store))
+ for _, e := range s.store {
+ vs = append(vs, e...)
+ }
+ return vs
+}
+
+type entry[K, V any] struct {
+ key K
+ val V
+}
+
+type JMap[K, V any, C Comparator[K]] struct {
+ store map[int][]*entry[K, V]
+ len int
+ comparator Comparator[K]
+ stats *JStatRec
+}
+
+func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] {
+ m := &JMap[K, V, C]{
+ store: make(map[int][]*entry[K, V], 1),
+ comparator: comparator,
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) {
+ if collectStats {
+ m.stats.Puts++
+ }
+ kh := m.comparator.Hash1(key)
+
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.PutHits++
+ m.stats.PutHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.PutHashConflicts++
+ }
+ }
+ m.store[kh] = append(m.store[kh], &entry[K, V]{key, val})
+ if collectStats {
+ if len(m.store[kh]) > m.stats.MaxSlotSize {
+ m.stats.MaxSlotSize = len(m.store[kh])
+ }
+ }
+ m.len++
+ if collectStats {
+ m.stats.CurSize = m.len
+ if m.len > m.stats.MaxSize {
+ m.stats.MaxSize = m.len
+ }
+ }
+ return val, false
+}
+
+func (m *JMap[K, V, C]) Values() []V {
+ vs := make([]V, 0, len(m.store))
+ for _, e := range m.store {
+ for _, v := range e {
+ vs = append(vs, v.val)
+ }
+ }
+ return vs
+}
+
+func (m *JMap[K, V, C]) Get(key K) (V, bool) {
+ if collectStats {
+ m.stats.Gets++
+ }
+ var none V
+ kh := m.comparator.Hash1(key)
+ var hClash bool
+ for _, e := range m.store[kh] {
+ hClash = true
+ if m.comparator.Equals2(e.key, key) {
+ if collectStats {
+ m.stats.GetHits++
+ m.stats.GetHashConflicts++
+ }
+ return e.val, true
+ }
+ if collectStats {
+ m.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ m.stats.GetHashConflicts++
+ }
+ m.stats.GetNoEnt++
+ }
+ return none, false
+}
+
+func (m *JMap[K, V, C]) Len() int {
+ return m.len
+}
+
+func (m *JMap[K, V, C]) Delete(key K) {
+ kh := m.comparator.Hash1(key)
+ for i, e := range m.store[kh] {
+ if m.comparator.Equals2(e.key, key) {
+ m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...)
+ m.len--
+ return
+ }
+ }
+}
+
+func (m *JMap[K, V, C]) Clear() {
+ m.store = make(map[int][]*entry[K, V])
+}
+
+type JPCMap struct {
+ store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]]
+ size int
+ stats *JStatRec
+}
+
+func NewJPCMap(cType CollectionSource, desc string) *JPCMap {
+ m := &JPCMap{
+ store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+ // Do we have a map stored by k1?
+ //
+ m2, present := pcm.store.Get(k1)
+ if present {
+ if collectStats {
+ pcm.stats.GetHits++
+ }
+ // We found a map of values corresponding to k1, so now we need to look up k2 in that map
+ //
+ return m2.Get(k2)
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) {
+
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ // First does a map already exist for k1?
+ //
+ if m2, present := pcm.store.Get(k1); present {
+ if collectStats {
+ pcm.stats.PutHits++
+ }
+ _, present = m2.Put(k2, v)
+ if !present {
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ }
+ } else {
+ // No map found for k1, so we create it, add in our value, then store is
+ //
+ if collectStats {
+ pcm.stats.PutMisses++
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry")
+ } else {
+ m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry")
+ }
+
+ m2.Put(k2, v)
+ pcm.store.Put(k1, m2)
+ pcm.size++
+ }
+}
+
+type JPCMap2 struct {
+ store map[int][]JPCEntry
+ size int
+ stats *JStatRec
+}
+
+type JPCEntry struct {
+ k1, k2, v *PredictionContext
+}
+
+func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 {
+ m := &JPCMap2{
+ store: make(map[int][]JPCEntry, 1000),
+ }
+ if collectStats {
+ m.stats = &JStatRec{
+ Source: cType,
+ Description: desc,
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ m.stats.CreateStack = debug.Stack()
+ }
+ Statistics.AddJStatRec(m.stats)
+ }
+ return m
+}
+
+func dHash(k1, k2 *PredictionContext) int {
+ return k1.cachedHash*31 + k2.cachedHash
+}
+
+func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Gets++
+ }
+
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.GetHits++
+ pcm.stats.GetHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.GetMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.GetHashConflicts++
+ }
+ pcm.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ pcm.stats.Puts++
+ }
+ h := dHash(k1, k2)
+ var hClash bool
+ for _, e := range pcm.store[h] {
+ hClash = true
+ if e.k1.Equals(k1) && e.k2.Equals(k2) {
+ if collectStats {
+ pcm.stats.PutHits++
+ pcm.stats.PutHashConflicts++
+ }
+ return e.v, true
+ }
+ if collectStats {
+ pcm.stats.PutMisses++
+ }
+ }
+ if collectStats {
+ if hClash {
+ pcm.stats.PutHashConflicts++
+ }
+ }
+ pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v})
+ pcm.size++
+ if collectStats {
+ pcm.stats.CurSize = pcm.size
+ if pcm.size > pcm.stats.MaxSize {
+ pcm.stats.MaxSize = pcm.size
+ }
+ }
+ return nil, false
+}
+
+type VisitEntry struct {
+ k *PredictionContext
+ v *PredictionContext
+}
+type VisitRecord struct {
+ store map[*PredictionContext]*PredictionContext
+ len int
+ stats *JStatRec
+}
+
+type VisitList struct {
+ cache *list.List
+ lock sync.RWMutex
+}
+
+var visitListPool = VisitList{
+ cache: list.New(),
+ lock: sync.RWMutex{},
+}
+
+// NewVisitRecord returns a new VisitRecord instance from the pool if available.
+// Note that this "map" uses a pointer as a key because we are emulating the behavior of
+// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal,
+// which means is the key the same reference to an object rather than is it .equals() to another
+// object.
+func NewVisitRecord() *VisitRecord {
+ visitListPool.lock.Lock()
+ el := visitListPool.cache.Front()
+ defer visitListPool.lock.Unlock()
+ var vr *VisitRecord
+ if el == nil {
+ vr = &VisitRecord{
+ store: make(map[*PredictionContext]*PredictionContext),
+ }
+ if collectStats {
+ vr.stats = &JStatRec{
+ Source: PredictionContextCacheCollection,
+ Description: "VisitRecord",
+ }
+ // Track where we created it from if we are being asked to do so
+ if runtimeConfig.statsTraceStacks {
+ vr.stats.CreateStack = debug.Stack()
+ }
+ }
+ } else {
+ vr = el.Value.(*VisitRecord)
+ visitListPool.cache.Remove(el)
+ vr.store = make(map[*PredictionContext]*PredictionContext)
+ }
+ if collectStats {
+ Statistics.AddJStatRec(vr.stats)
+ }
+ return vr
+}
+
+func (vr *VisitRecord) Release() {
+ vr.len = 0
+ vr.store = nil
+ if collectStats {
+ vr.stats.MaxSize = 0
+ vr.stats.CurSize = 0
+ vr.stats.Gets = 0
+ vr.stats.GetHits = 0
+ vr.stats.GetMisses = 0
+ vr.stats.GetHashConflicts = 0
+ vr.stats.GetNoEnt = 0
+ vr.stats.Puts = 0
+ vr.stats.PutHits = 0
+ vr.stats.PutMisses = 0
+ vr.stats.PutHashConflicts = 0
+ vr.stats.MaxSlotSize = 0
+ }
+ visitListPool.lock.Lock()
+ visitListPool.cache.PushBack(vr)
+ visitListPool.lock.Unlock()
+}
+
+func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Gets++
+ }
+ v := vr.store[k]
+ if v != nil {
+ if collectStats {
+ vr.stats.GetHits++
+ }
+ return v, true
+ }
+ if collectStats {
+ vr.stats.GetNoEnt++
+ }
+ return nil, false
+}
+
+func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) {
+ if collectStats {
+ vr.stats.Puts++
+ }
+ vr.store[k] = v
+ vr.len++
+ if collectStats {
+ vr.stats.CurSize = vr.len
+ if vr.len > vr.stats.MaxSize {
+ vr.stats.MaxSize = vr.len
+ }
+ }
+ return v, false
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go
new file mode 100644
index 000000000..3c7896a91
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go
@@ -0,0 +1,426 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A lexer is recognizer that draws input symbols from a character stream.
+// lexer grammars result in a subclass of this object. A Lexer object
+// uses simplified Match() and error recovery mechanisms in the interest
+// of speed.
+///
+
+type Lexer interface {
+ TokenSource
+ Recognizer
+
+ Emit() Token
+
+ SetChannel(int)
+ PushMode(int)
+ PopMode() int
+ SetType(int)
+ SetMode(int)
+}
+
+type BaseLexer struct {
+ *BaseRecognizer
+
+ Interpreter ILexerATNSimulator
+ TokenStartCharIndex int
+ TokenStartLine int
+ TokenStartColumn int
+ ActionType int
+ Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
+
+ input CharStream
+ factory TokenFactory
+ tokenFactorySourcePair *TokenSourceCharStreamPair
+ token Token
+ hitEOF bool
+ channel int
+ thetype int
+ modeStack IntStack
+ mode int
+ text string
+}
+
+func NewBaseLexer(input CharStream) *BaseLexer {
+
+ lexer := new(BaseLexer)
+
+ lexer.BaseRecognizer = NewBaseRecognizer()
+
+ lexer.input = input
+ lexer.factory = CommonTokenFactoryDEFAULT
+ lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
+
+ lexer.Virt = lexer
+
+ lexer.Interpreter = nil // child classes must populate it
+
+ // The goal of all lexer rules/methods is to create a token object.
+ // l is an instance variable as multiple rules may collaborate to
+ // create a single token. NextToken will return l object after
+ // Matching lexer rule(s). If you subclass to allow multiple token
+ // emissions, then set l to the last token to be Matched or
+ // something non nil so that the auto token emit mechanism will not
+ // emit another token.
+ lexer.token = nil
+
+ // What character index in the stream did the current token start at?
+ // Needed, for example, to get the text for current token. Set at
+ // the start of NextToken.
+ lexer.TokenStartCharIndex = -1
+
+ // The line on which the first character of the token resides///
+ lexer.TokenStartLine = -1
+
+ // The character position of first character within the line///
+ lexer.TokenStartColumn = -1
+
+ // Once we see EOF on char stream, next token will be EOF.
+ // If you have DONE : EOF then you see DONE EOF.
+ lexer.hitEOF = false
+
+ // The channel number for the current token///
+ lexer.channel = TokenDefaultChannel
+
+ // The token type for the current token///
+ lexer.thetype = TokenInvalidType
+
+ lexer.modeStack = make([]int, 0)
+ lexer.mode = LexerDefaultMode
+
+ // You can set the text for the current token to override what is in
+ // the input char buffer. Use setText() or can set l instance var.
+ // /
+ lexer.text = ""
+
+ return lexer
+}
+
+const (
+ LexerDefaultMode = 0
+ LexerMore = -2
+ LexerSkip = -3
+)
+
+//goland:noinspection GoUnusedConst
+const (
+ LexerDefaultTokenChannel = TokenDefaultChannel
+ LexerHidden = TokenHiddenChannel
+ LexerMinCharValue = 0x0000
+ LexerMaxCharValue = 0x10FFFF
+)
+
+func (b *BaseLexer) Reset() {
+ // wack Lexer state variables
+ if b.input != nil {
+ b.input.Seek(0) // rewind the input
+ }
+ b.token = nil
+ b.thetype = TokenInvalidType
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = -1
+ b.TokenStartColumn = -1
+ b.TokenStartLine = -1
+ b.text = ""
+
+ b.hitEOF = false
+ b.mode = LexerDefaultMode
+ b.modeStack = make([]int, 0)
+
+ b.Interpreter.reset()
+}
+
+func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
+ return b.Interpreter
+}
+
+func (b *BaseLexer) GetInputStream() CharStream {
+ return b.input
+}
+
+func (b *BaseLexer) GetSourceName() string {
+ return b.GrammarFileName
+}
+
+func (b *BaseLexer) SetChannel(v int) {
+ b.channel = v
+}
+
+func (b *BaseLexer) GetTokenFactory() TokenFactory {
+ return b.factory
+}
+
+func (b *BaseLexer) setTokenFactory(f TokenFactory) {
+ b.factory = f
+}
+
+func (b *BaseLexer) safeMatch() (ret int) {
+ defer func() {
+ if e := recover(); e != nil {
+ if re, ok := e.(RecognitionException); ok {
+ b.notifyListeners(re) // Report error
+ b.Recover(re)
+ ret = LexerSkip // default
+ }
+ }
+ }()
+
+ return b.Interpreter.Match(b.input, b.mode)
+}
+
+// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream.
+func (b *BaseLexer) NextToken() Token {
+ if b.input == nil {
+ panic("NextToken requires a non-nil input stream.")
+ }
+
+ tokenStartMarker := b.input.Mark()
+
+ // previously in finally block
+ defer func() {
+ // make sure we release marker after Match or
+ // unbuffered char stream will keep buffering
+ b.input.Release(tokenStartMarker)
+ }()
+
+ for {
+ if b.hitEOF {
+ b.EmitEOF()
+ return b.token
+ }
+ b.token = nil
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = b.input.Index()
+ b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
+ b.TokenStartLine = b.Interpreter.GetLine()
+ b.text = ""
+ continueOuter := false
+ for {
+ b.thetype = TokenInvalidType
+
+ ttype := b.safeMatch()
+
+ if b.input.LA(1) == TokenEOF {
+ b.hitEOF = true
+ }
+ if b.thetype == TokenInvalidType {
+ b.thetype = ttype
+ }
+ if b.thetype == LexerSkip {
+ continueOuter = true
+ break
+ }
+ if b.thetype != LexerMore {
+ break
+ }
+ }
+
+ if continueOuter {
+ continue
+ }
+ if b.token == nil {
+ b.Virt.Emit()
+ }
+ return b.token
+ }
+}
+
+// Skip instructs the lexer to Skip creating a token for current lexer rule
+// and look for another token. [NextToken] knows to keep looking when
+// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that
+// if token==nil at end of any token rule, it creates one for you
+// and emits it.
+func (b *BaseLexer) Skip() {
+ b.thetype = LexerSkip
+}
+
+func (b *BaseLexer) More() {
+ b.thetype = LexerMore
+}
+
+// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode
+// will be in force.
+func (b *BaseLexer) SetMode(m int) {
+ b.mode = m
+}
+
+// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the
+// current lexer mode to the supplied mode m.
+func (b *BaseLexer) PushMode(m int) {
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("pushMode " + strconv.Itoa(m))
+ }
+ b.modeStack.Push(b.mode)
+ b.mode = m
+}
+
+// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to
+// return to.
+func (b *BaseLexer) PopMode() int {
+ if len(b.modeStack) == 0 {
+ panic("Empty Stack")
+ }
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
+ }
+ i, _ := b.modeStack.Pop()
+ b.mode = i
+ return b.mode
+}
+
+func (b *BaseLexer) inputStream() CharStream {
+ return b.input
+}
+
+// SetInputStream resets the lexer input stream and associated lexer state.
+func (b *BaseLexer) SetInputStream(input CharStream) {
+ b.input = nil
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+ b.Reset()
+ b.input = input
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+}
+
+func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
+ return b.tokenFactorySourcePair
+}
+
+// EmitToken by default does not support multiple emits per [NextToken] invocation
+// for efficiency reasons. Subclass and override this func, [NextToken],
+// and [GetToken] (to push tokens into a list and pull from that list
+// rather than a single variable as this implementation does).
+func (b *BaseLexer) EmitToken(token Token) {
+ b.token = token
+}
+
+// Emit is the standard method called to automatically emit a token at the
+// outermost lexical rule. The token object should point into the
+// char buffer start..stop. If there is a text override in 'text',
+// use that to set the token's text. Override this method to emit
+// custom [Token] objects or provide a new factory.
+// /
+func (b *BaseLexer) Emit() Token {
+ t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
+ b.EmitToken(t)
+ return t
+}
+
+// EmitEOF emits an EOF token. By default, this is the last token emitted
+func (b *BaseLexer) EmitEOF() Token {
+ cpos := b.GetCharPositionInLine()
+ lpos := b.GetLine()
+ eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
+ b.EmitToken(eof)
+ return eof
+}
+
+// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned.
+func (b *BaseLexer) GetCharPositionInLine() int {
+ return b.Interpreter.GetCharPositionInLine()
+}
+
+func (b *BaseLexer) GetLine() int {
+ return b.Interpreter.GetLine()
+}
+
+func (b *BaseLexer) GetType() int {
+ return b.thetype
+}
+
+func (b *BaseLexer) SetType(t int) {
+ b.thetype = t
+}
+
+// GetCharIndex returns the index of the current character of lookahead
+func (b *BaseLexer) GetCharIndex() int {
+ return b.input.Index()
+}
+
+// GetText returns the text Matched so far for the current token or any text override.
+func (b *BaseLexer) GetText() string {
+ if b.text != "" {
+ return b.text
+ }
+
+ return b.Interpreter.GetText(b.input)
+}
+
+// SetText sets the complete text of this token; it wipes any previous changes to the text.
+func (b *BaseLexer) SetText(text string) {
+ b.text = text
+}
+
+// GetATN returns the ATN used by the lexer.
+func (b *BaseLexer) GetATN() *ATN {
+ return b.Interpreter.ATN()
+}
+
+// GetAllTokens returns a list of all [Token] objects in input char stream.
+// Forces a load of all tokens that can be made from the input char stream.
+//
+// Does not include EOF token.
+func (b *BaseLexer) GetAllTokens() []Token {
+ vl := b.Virt
+ tokens := make([]Token, 0)
+ t := vl.NextToken()
+ for t.GetTokenType() != TokenEOF {
+ tokens = append(tokens, t)
+ t = vl.NextToken()
+ }
+ return tokens
+}
+
+func (b *BaseLexer) notifyListeners(e RecognitionException) {
+ start := b.TokenStartCharIndex
+ stop := b.input.Index()
+ text := b.input.GetTextFromInterval(NewInterval(start, stop))
+ msg := "token recognition error at: '" + text + "'"
+ listener := b.GetErrorListenerDispatch()
+ listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
+}
+
+func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
+ if c == TokenEOF {
+ return ""
+ } else if c == '\n' {
+ return "\\n"
+ } else if c == '\t' {
+ return "\\t"
+ } else if c == '\r' {
+ return "\\r"
+ } else {
+ return string(c)
+ }
+}
+
+func (b *BaseLexer) getCharErrorDisplay(c rune) string {
+ return "'" + b.getErrorDisplayForChar(c) + "'"
+}
+
+// Recover can normally Match any char in its vocabulary after Matching
+// a token, so here we do the easy thing and just kill a character and hope
+// it all works out. You can instead use the rule invocation stack
+// to do sophisticated error recovery if you are in a fragment rule.
+//
+// In general, lexers should not need to recover and should have rules that cover any eventuality, such as
+// a character that makes no sense to the recognizer.
+func (b *BaseLexer) Recover(re RecognitionException) {
+ if b.input.LA(1) != TokenEOF {
+ if _, ok := re.(*LexerNoViableAltException); ok {
+ // Skip a char and try again
+ b.Interpreter.Consume(b.input)
+ } else {
+ // TODO: Do we lose character or line position information?
+ b.input.Consume()
+ }
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
similarity index 78%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
rename to vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
index 111656c29..eaa7393e0 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go
@@ -7,14 +7,29 @@ package antlr
import "strconv"
const (
- LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
- LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
- LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
- LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
- LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
- LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
- LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
- LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
+ // LexerActionTypeChannel represents a [LexerChannelAction] action.
+ LexerActionTypeChannel = 0
+
+ // LexerActionTypeCustom represents a [LexerCustomAction] action.
+ LexerActionTypeCustom = 1
+
+ // LexerActionTypeMode represents a [LexerModeAction] action.
+ LexerActionTypeMode = 2
+
+ // LexerActionTypeMore represents a [LexerMoreAction] action.
+ LexerActionTypeMore = 3
+
+ // LexerActionTypePopMode represents a [LexerPopModeAction] action.
+ LexerActionTypePopMode = 4
+
+ // LexerActionTypePushMode represents a [LexerPushModeAction] action.
+ LexerActionTypePushMode = 5
+
+ // LexerActionTypeSkip represents a [LexerSkipAction] action.
+ LexerActionTypeSkip = 6
+
+ // LexerActionTypeType represents a [LexerTypeAction] action.
+ LexerActionTypeType = 7
)
type LexerAction interface {
@@ -39,7 +54,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction {
return la
}
-func (b *BaseLexerAction) execute(lexer Lexer) {
+func (b *BaseLexerAction) execute(_ Lexer) {
panic("Not implemented")
}
@@ -52,17 +67,19 @@ func (b *BaseLexerAction) getIsPositionDependent() bool {
}
func (b *BaseLexerAction) Hash() int {
- return b.actionType
+ h := murmurInit(0)
+ h = murmurUpdate(h, b.actionType)
+ return murmurFinish(h, 1)
}
func (b *BaseLexerAction) Equals(other LexerAction) bool {
- return b == other
+ return b.actionType == other.getActionType()
}
-// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
+// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip].
//
-// The {@code Skip} command does not have any parameters, so l action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}.
+// The Skip command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE].
type LexerSkipAction struct {
*BaseLexerAction
}
@@ -73,17 +90,22 @@ func NewLexerSkipAction() *LexerSkipAction {
return la
}
-// Provides a singleton instance of l parameterless lexer action.
+// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action.
var LexerSkipActionINSTANCE = NewLexerSkipAction()
func (l *LexerSkipAction) execute(lexer Lexer) {
lexer.Skip()
}
+// String returns a string representation of the current [LexerSkipAction].
func (l *LexerSkipAction) String() string {
return "skip"
}
+func (b *LexerSkipAction) Equals(other LexerAction) bool {
+ return other.getActionType() == LexerActionTypeSkip
+}
+
// Implements the {@code type} lexer action by calling {@link Lexer//setType}
//
// with the assigned type.
@@ -125,11 +147,10 @@ func (l *LexerTypeAction) String() string {
return "actionType(" + strconv.Itoa(l.thetype) + ")"
}
-// Implements the {@code pushMode} lexer action by calling
-// {@link Lexer//pushMode} with the assigned mode.
+// LexerPushModeAction implements the pushMode lexer action by calling
+// [Lexer.pushMode] with the assigned mode.
type LexerPushModeAction struct {
*BaseLexerAction
-
mode int
}
@@ -169,10 +190,10 @@ func (l *LexerPushModeAction) String() string {
return "pushMode(" + strconv.Itoa(l.mode) + ")"
}
-// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
+// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode].
//
-// The {@code popMode} command does not have any parameters, so l action is
-// implemented as a singleton instance exposed by {@link //INSTANCE}.
+// The popMode command does not have any parameters, so this action is
+// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE]
type LexerPopModeAction struct {
*BaseLexerAction
}
@@ -224,11 +245,10 @@ func (l *LexerMoreAction) String() string {
return "more"
}
-// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
+// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with
// the assigned mode.
type LexerModeAction struct {
*BaseLexerAction
-
mode int
}
@@ -322,16 +342,19 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool {
}
}
-// Implements the {@code channel} lexer action by calling
-// {@link Lexer//setChannel} with the assigned channel.
-// Constructs a New{@code channel} action with the specified channel value.
-// @param channel The channel value to pass to {@link Lexer//setChannel}.
+// LexerChannelAction implements the channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
type LexerChannelAction struct {
*BaseLexerAction
-
channel int
}
+// NewLexerChannelAction creates a channel lexer action by calling
+// [Lexer.setChannel] with the assigned channel.
+//
+// Constructs a new channel action with the specified channel value.
func NewLexerChannelAction(channel int) *LexerChannelAction {
l := new(LexerChannelAction)
l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
@@ -375,25 +398,22 @@ func (l *LexerChannelAction) String() string {
// lexer actions, see {@link LexerActionExecutor//append} and
// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
-// Constructs a Newindexed custom action by associating a character offset
-// with a {@link LexerAction}.
-//
-// Note: This class is only required for lexer actions for which
-// {@link LexerAction//isPositionDependent} returns {@code true}.
-//
-// @param offset The offset into the input {@link CharStream}, relative to
-// the token start index, at which the specified lexer action should be
-// executed.
-// @param action The lexer action to execute at a particular offset in the
-// input {@link CharStream}.
type LexerIndexedCustomAction struct {
*BaseLexerAction
-
offset int
lexerAction LexerAction
isPositionDependent bool
}
+// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset
+// with a [LexerAction].
+//
+// Note: This class is only required for lexer actions for which
+// [LexerAction.isPositionDependent] returns true.
+//
+// The offset points into the input [CharStream], relative to
+// the token start index, at which the specified lexerAction should be
+// executed.
func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
l := new(LexerIndexedCustomAction)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
new file mode 100644
index 000000000..dfc28c32b
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "golang.org/x/exp/slices"
+
+// Represents an executor for a sequence of lexer actions which traversed during
+// the Matching operation of a lexer rule (token).
+//
+// The executor tracks position information for position-dependent lexer actions
+// efficiently, ensuring that actions appearing only at the end of the rule do
+// not cause bloating of the {@link DFA} created for the lexer.
+
+type LexerActionExecutor struct {
+ lexerActions []LexerAction
+ cachedHash int
+}
+
+func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
+
+ if lexerActions == nil {
+ lexerActions = make([]LexerAction, 0)
+ }
+
+ l := new(LexerActionExecutor)
+
+ l.lexerActions = lexerActions
+
+ // Caches the result of {@link //hashCode} since the hash code is an element
+ // of the performance-critical {@link ATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(0)
+ for _, a := range lexerActions {
+ l.cachedHash = murmurUpdate(l.cachedHash, a.Hash())
+ }
+ l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions))
+
+ return l
+}
+
+// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for
+// the input [LexerActionExecutor] followed by a specified
+// [LexerAction].
+// TODO: This does not match the Java code
+func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
+ if lexerActionExecutor == nil {
+ return NewLexerActionExecutor([]LexerAction{lexerAction})
+ }
+
+ return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
+}
+
+// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset
+// for position-dependent lexer actions.
+//
+// Normally, when the executor encounters lexer actions where
+// [LexerAction.isPositionDependent] returns true, it calls
+// [IntStream.Seek] on the input [CharStream] to set the input
+// position to the end of the current token. This behavior provides
+// for efficient [DFA] representation of lexer actions which appear at the end
+// of a lexer rule, even when the lexer rule Matches a variable number of
+// characters.
+//
+// Prior to traversing a Match transition in the [ATN], the current offset
+// from the token start index is assigned to all position-dependent lexer
+// actions which have not already been assigned a fixed offset. By storing
+// the offsets relative to the token start index, the [DFA] representation of
+// lexer actions which appear in the middle of tokens remains efficient due
+// to sharing among tokens of the same Length, regardless of their absolute
+// position in the input stream.
+//
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns this instance.
+//
+// The offset is assigned to all position-dependent
+// lexer actions which do not already have offsets assigned.
+//
+// The func returns a [LexerActionExecutor] that stores input stream offsets
+// for all position-dependent lexer actions.
+func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
+ var updatedLexerActions []LexerAction
+ for i := 0; i < len(l.lexerActions); i++ {
+ _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
+ if l.lexerActions[i].getIsPositionDependent() && !ok {
+ if updatedLexerActions == nil {
+ updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions))
+ updatedLexerActions = append(updatedLexerActions, l.lexerActions...)
+ }
+ updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
+ }
+ }
+ if updatedLexerActions == nil {
+ return l
+ }
+
+ return NewLexerActionExecutor(updatedLexerActions)
+}
+
+// Execute the actions encapsulated by l executor within the context of a
+// particular {@link Lexer}.
+//
+// This method calls {@link IntStream//seek} to set the position of the
+// {@code input} {@link CharStream} prior to calling
+// {@link LexerAction//execute} on a position-dependent action. Before the
+// method returns, the input position will be restored to the same position
+// it was in when the method was invoked.
+//
+// @param lexer The lexer instance.
+// @param input The input stream which is the source for the current token.
+// When l method is called, the current {@link IntStream//index} for
+// {@code input} should be the start of the following token, i.e. 1
+// character past the end of the current token.
+// @param startIndex The token start index. This value may be passed to
+// {@link IntStream//seek} to set the {@code input} position to the beginning
+// of the token.
+// /
+func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
+ requiresSeek := false
+ stopIndex := input.Index()
+
+ defer func() {
+ if requiresSeek {
+ input.Seek(stopIndex)
+ }
+ }()
+
+ for i := 0; i < len(l.lexerActions); i++ {
+ lexerAction := l.lexerActions[i]
+ if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
+ offset := la.offset
+ input.Seek(startIndex + offset)
+ lexerAction = la.lexerAction
+ requiresSeek = (startIndex + offset) != stopIndex
+ } else if lexerAction.getIsPositionDependent() {
+ input.Seek(stopIndex)
+ requiresSeek = false
+ }
+ lexerAction.execute(lexer)
+ }
+}
+
+func (l *LexerActionExecutor) Hash() int {
+ if l == nil {
+ // TODO: Why is this here? l should not be nil
+ return 61
+ }
+
+ // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode
+ return l.cachedHash
+}
+
+func (l *LexerActionExecutor) Equals(other interface{}) bool {
+ if l == other {
+ return true
+ }
+ othert, ok := other.(*LexerActionExecutor)
+ if !ok {
+ return false
+ }
+ if othert == nil {
+ return false
+ }
+ if l.cachedHash != othert.cachedHash {
+ return false
+ }
+ if len(l.lexerActions) != len(othert.lexerActions) {
+ return false
+ }
+ return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool {
+ return i.Equals(j)
+ })
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
similarity index 80%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
rename to vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
index c573b7521..fe938b025 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go
@@ -10,10 +10,8 @@ import (
"strings"
)
+//goland:noinspection GoUnusedGlobalVariable
var (
- LexerATNSimulatorDebug = false
- LexerATNSimulatorDFADebug = false
-
LexerATNSimulatorMinDFAEdge = 0
LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
@@ -32,11 +30,11 @@ type ILexerATNSimulator interface {
}
type LexerATNSimulator struct {
- *BaseATNSimulator
+ BaseATNSimulator
recog Lexer
predictionMode int
- mergeCache DoubleDict
+ mergeCache *JPCMap2
startIndex int
Line int
CharPositionInLine int
@@ -46,27 +44,35 @@ type LexerATNSimulator struct {
}
func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
- l := new(LexerATNSimulator)
-
- l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+ l := &LexerATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
l.decisionToDFA = decisionToDFA
l.recog = recog
+
// The current token's starting index into the character stream.
// Shared across DFA to ATN simulation in case the ATN fails and the
// DFA did not have a previous accept state. In l case, we use the
// ATN-generated exception object.
l.startIndex = -1
- // line number 1..n within the input///
+
+ // line number 1..n within the input
l.Line = 1
+
// The index of the character relative to the beginning of the line
- // 0..n-1///
+ // 0..n-1
l.CharPositionInLine = 0
+
l.mode = LexerDefaultMode
+
// Used during DFA/ATN exec to record the most recent accept configuration
// info
l.prevAccept = NewSimState()
- // done
+
return l
}
@@ -114,7 +120,7 @@ func (l *LexerATNSimulator) reset() {
func (l *LexerATNSimulator) MatchATN(input CharStream) int {
startState := l.atn.modeToStartState[l.mode]
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
}
oldMode := l.mode
@@ -126,7 +132,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
predict := l.execATN(input, next)
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
}
return predict
@@ -134,18 +140,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int {
func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("start state closure=" + ds0.configs.String())
}
if ds0.isAcceptState {
- // allow zero-length tokens
+ // allow zero-Length tokens
l.captureSimState(l.prevAccept, input, ds0)
}
t := input.LA(1)
s := ds0 // s is current/from DFA state
for { // while more work
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("execATN loop starting closure: " + s.configs.String())
}
@@ -188,7 +194,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
}
}
t = input.LA(1)
- s = target // flip current DFA target becomes Newsrc/from state
+ s = target // flip current DFA target becomes new src/from state
}
return l.failOrAccept(l.prevAccept, input, s.configs, t)
@@ -214,43 +220,39 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState
return nil
}
target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
- if LexerATNSimulatorDebug && target != nil {
+ if runtimeConfig.lexerATNSimulatorDebug && target != nil {
fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
}
return target
}
-// Compute a target state for an edge in the DFA, and attempt to add the
-// computed state and corresponding edge to the DFA.
+// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the
+// computed state and corresponding edge to the [DFA].
//
-// @param input The input stream
-// @param s The current DFA state
-// @param t The next input symbol
-//
-// @return The computed target DFA state for the given input symbol
-// {@code t}. If {@code t} does not lead to a valid DFA state, l method
-// returns {@link //ERROR}.
+// The func returns the computed target [DFA] state for the given input symbol t.
+// If this does not lead to a valid [DFA] state, this method
+// returns ATNSimulatorError.
func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
reach := NewOrderedATNConfigSet()
// if we don't find an existing DFA state
// Fill reach starting from closure, following t transitions
- l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
+ l.getReachableConfigSet(input, s.configs, reach, t)
if len(reach.configs) == 0 { // we got nowhere on t from s
if !reach.hasSemanticContext {
// we got nowhere on t, don't panic out l knowledge it'd
- // cause a failover from DFA later.
+ // cause a fail-over from DFA later.
l.addDFAEdge(s, t, ATNSimulatorError, nil)
}
// stop when we can't Match any more char
return ATNSimulatorError
}
// Add an edge from s to target DFA found/created for reach
- return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
+ return l.addDFAEdge(s, t, nil, reach)
}
-func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int {
if l.prevAccept.dfaState != nil {
lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
@@ -265,34 +267,35 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream,
panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
}
-// Given a starting configuration set, figure out all ATN configurations
-// we can reach upon input {@code t}. Parameter {@code reach} is a return
-// parameter.
-func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
+// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations
+// we can reach upon input t.
+//
+// Parameter reach is a return parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) {
// l is used to Skip processing for configs which have a lower priority
- // than a config that already reached an accept state for the same rule
+ // than a runtimeConfig that already reached an accept state for the same rule
SkipAlt := ATNInvalidAltNumber
- for _, cfg := range closure.GetItems() {
- currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
- if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
+ for _, cfg := range closure.configs {
+ currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt
+ if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision {
continue
}
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
- fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String())
}
for _, trans := range cfg.GetState().GetTransitions() {
target := l.getReachableTarget(trans, t)
if target != nil {
- lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
+ lexerActionExecutor := cfg.lexerActionExecutor
if lexerActionExecutor != nil {
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
}
- treatEOFAsEpsilon := (t == TokenEOF)
- config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
+ treatEOFAsEpsilon := t == TokenEOF
+ config := NewLexerATNConfig3(cfg, target, lexerActionExecutor)
if l.closure(input, config, reach,
currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
// any remaining configs for l alt have a lower priority
@@ -305,7 +308,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC
}
func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Printf("ACTION %v\n", lexerActionExecutor)
}
// seek to after last char in token
@@ -325,7 +328,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState
return nil
}
-func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet {
configs := NewOrderedATNConfigSet()
for i := 0; i < len(p.GetTransitions()); i++ {
target := p.GetTransitions()[i].getTarget()
@@ -336,25 +339,24 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord
return configs
}
-// Since the alternatives within any lexer decision are ordered by
-// preference, l method stops pursuing the closure as soon as an accept
+// closure since the alternatives within any lexer decision are ordered by
+// preference, this method stops pursuing the closure as soon as an accept
// state is reached. After the first accept state is reached by depth-first
-// search from {@code config}, all other (potentially reachable) states for
-// l rule would have a lower priority.
+// search from runtimeConfig, all other (potentially reachable) states for
+// this rule would have a lower priority.
//
-// @return {@code true} if an accept state is reached, otherwise
-// {@code false}.
-func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
+// The func returns true if an accept state is reached.
+func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet,
currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
- if LexerATNSimulatorDebug {
- fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
+ if runtimeConfig.lexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")")
}
_, ok := config.state.(*RuleStopState)
if ok {
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
if l.recog != nil {
fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
} else {
@@ -401,10 +403,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co
}
// side-effect: can alter configs.hasSemanticContext
-func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
- configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition,
+ configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig {
- var cfg *LexerATNConfig
+ var cfg *ATNConfig
if trans.getSerializationType() == TransitionRULE {
@@ -435,10 +437,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
pt := trans.(*PredicateTransition)
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
}
- configs.SetHasSemanticContext(true)
+ configs.hasSemanticContext = true
if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
cfg = NewLexerATNConfig4(config, trans.getTarget())
}
@@ -449,7 +451,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
// TODO: if the entry rule is invoked recursively, some
// actions may be executed during the recursive call. The
// problem can appear when hasEmptyPath() is true but
- // isEmpty() is false. In l case, the config needs to be
+ // isEmpty() is false. In this case, the config needs to be
// split into two contexts - one with just the empty path
// and another with everything but the empty path.
// Unfortunately, the current algorithm does not allow
@@ -476,26 +478,18 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC
return cfg
}
-// Evaluate a predicate specified in the lexer.
+// evaluatePredicate eEvaluates a predicate specified in the lexer.
//
-// If {@code speculative} is {@code true}, l method was called before
-// {@link //consume} for the Matched character. This method should call
-// {@link //consume} before evaluating the predicate to ensure position
-// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
-// and {@link Lexer//getcolumn}, properly reflect the current
-// lexer state. This method should restore {@code input} and the simulator
-// to the original state before returning (i.e. undo the actions made by the
-// call to {@link //consume}.
+// If speculative is true, this method was called before
+// [consume] for the Matched character. This method should call
+// [consume] before evaluating the predicate to ensure position
+// sensitive values, including [GetText], [GetLine],
+// and [GetColumn], properly reflect the current
+// lexer state. This method should restore input and the simulator
+// to the original state before returning, i.e. undo the actions made by the
+// call to [Consume].
//
-// @param input The input stream.
-// @param ruleIndex The rule containing the predicate.
-// @param predIndex The index of the predicate within the rule.
-// @param speculative {@code true} if the current index in {@code input} is
-// one character before the predicate's location.
-//
-// @return {@code true} if the specified predicate evaluates to
-// {@code true}.
-// /
+// The func returns true if the specified predicate evaluates to true.
func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
// assume true if no recognizer was provided
if l.recog == nil {
@@ -527,7 +521,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream
settings.dfaState = dfaState
}
-func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState {
if to == nil && cfgs != nil {
// leading to l call, ATNConfigSet.hasSemanticContext is used as a
// marker indicating dynamic predicate evaluation makes l edge
@@ -539,10 +533,9 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// TJP notes: next time through the DFA, we see a pred again and eval.
// If that gets us to a previously created (but dangling) DFA
// state, we can continue in pure DFA mode from there.
- // /
- suppressEdge := cfgs.HasSemanticContext()
- cfgs.SetHasSemanticContext(false)
-
+ //
+ suppressEdge := cfgs.hasSemanticContext
+ cfgs.hasSemanticContext = false
to = l.addDFAState(cfgs, true)
if suppressEdge {
@@ -554,7 +547,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// Only track edges within the DFA bounds
return to
}
- if LexerATNSimulatorDebug {
+ if runtimeConfig.lexerATNSimulatorDebug {
fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
}
l.atn.edgeMu.Lock()
@@ -572,13 +565,12 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg
// configurations already. This method also detects the first
// configuration containing an ATN rule stop state. Later, when
// traversing the DFA, we will know which rule to accept.
-func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
+func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState {
proposed := NewDFAState(-1, configs)
- var firstConfigWithRuleStopState ATNConfig
-
- for _, cfg := range configs.GetItems() {
+ var firstConfigWithRuleStopState *ATNConfig
+ for _, cfg := range configs.configs {
_, ok := cfg.GetState().(*RuleStopState)
if ok {
@@ -588,14 +580,14 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
}
if firstConfigWithRuleStopState != nil {
proposed.isAcceptState = true
- proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
}
dfa := l.decisionToDFA[l.mode]
l.atn.stateMu.Lock()
defer l.atn.stateMu.Unlock()
- existing, present := dfa.states.Get(proposed)
+ existing, present := dfa.Get(proposed)
if present {
// This state was already present, so just return it.
@@ -605,10 +597,11 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool)
// We need to add the new state
//
- proposed.stateNumber = dfa.states.Len()
- configs.SetReadOnly(true)
+ proposed.stateNumber = dfa.Len()
+ configs.readOnly = true
+ configs.configLookup = nil // Not needed now
proposed.configs = configs
- dfa.states.Put(proposed)
+ dfa.Put(proposed)
}
if !suppressEdge {
dfa.setS0(proposed)
@@ -620,7 +613,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA {
return l.decisionToDFA[mode]
}
-// Get the text Matched so far for the current token.
+// GetText returns the text [Match]ed so far for the current token.
func (l *LexerATNSimulator) GetText(input CharStream) string {
// index is first lookahead char, don't include.
return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
new file mode 100644
index 000000000..4955ac876
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go
@@ -0,0 +1,218 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type LL1Analyzer struct {
+ atn *ATN
+}
+
+func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
+ la := new(LL1Analyzer)
+ la.atn = atn
+ return la
+}
+
+const (
+ // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit
+ // a predicate during analysis if
+ //
+ // seeThruPreds==false
+ LL1AnalyzerHitPred = TokenInvalidType
+)
+
+// *
+// Calculates the SLL(1) expected lookahead set for each outgoing transition
+// of an {@link ATNState}. The returned array has one element for each
+// outgoing transition in {@code s}. If the closure from transition
+// i leads to a semantic predicate before Matching a symbol, the
+// element at index i of the result will be {@code nil}.
+//
+// @param s the ATN state
+// @return the expected symbols for each outgoing transition of {@code s}.
+func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
+ if s == nil {
+ return nil
+ }
+ count := len(s.GetTransitions())
+ look := make([]*IntervalSet, count)
+ for alt := 0; alt < count; alt++ {
+
+ look[alt] = NewIntervalSet()
+ lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy")
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false)
+
+ // Wipe out lookahead for la alternative if we found nothing,
+ // or we had a predicate when we !seeThruPreds
+ if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
+ look[alt] = nil
+ }
+ }
+ return look
+}
+
+// Look computes the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+//
+// If ctx is nil and the end of the rule containing
+// s is reached, [EPSILON] is added to the result set.
+//
+// If ctx is not nil and the end of the outermost rule is
+// reached, [EOF] is added to the result set.
+//
+// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a
+// [BlockEndState] to detect epsilon paths through a closure.
+//
+// Parameter ctx is the complete parser context, or nil if the context
+// should be ignored
+//
+// The func returns the set of tokens that can follow s in the [ATN] in the
+// specified ctx.
+func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
+ r := NewIntervalSet()
+ var lookContext *PredictionContext
+ if ctx != nil {
+ lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
+ }
+ la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"),
+ NewBitSet(), true, true)
+ return r
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+// If {@code ctx} is {@code nil} and {@code stopState} or the end of the
+// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
+// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
+// {@code true} and {@code stopState} or the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state.
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx The outer context, or {@code nil} if the outer context should
+// not be used.
+// @param look The result lookahead set.
+// @param lookBusy A set used for preventing epsilon closures in the ATN
+// from causing a stack overflow. Outside code should pass
+// {@code NewSet} for la argument.
+// @param calledRuleStack A set used for preventing left recursion in the
+// ATN from causing a stack overflow. Outside code should pass
+// {@code NewBitSet()} for la argument.
+// @param seeThruPreds {@code true} to true semantic predicates as
+// implicitly {@code true} and "see through them", otherwise {@code false}
+// to treat semantic predicates as opaque and add {@link //HitPred} to the
+// result if one is encountered.
+// @param addEOF Add {@link Token//EOF} to the result if the end of the
+// outermost context is reached. This parameter has no effect if {@code ctx}
+// is {@code nil}.
+
+func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
+
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+
+ c := NewATNConfig6(s, 0, ctx)
+
+ if lookBusy.Contains(c) {
+ return
+ }
+
+ _, present := lookBusy.Put(c)
+ if present {
+ return
+
+ }
+ if s == stopState {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+ }
+
+ _, ok := s.(*RuleStopState)
+
+ if ok {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+
+ if ctx.pcType != PredictionContextEmpty {
+ removed := calledRuleStack.contains(s.GetRuleIndex())
+ defer func() {
+ if removed {
+ calledRuleStack.add(s.GetRuleIndex())
+ }
+ }()
+ calledRuleStack.remove(s.GetRuleIndex())
+ // run thru all possible stack tops in ctx
+ for i := 0; i < ctx.length(); i++ {
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
+ }
+ return
+ }
+ }
+
+ n := len(s.GetTransitions())
+
+ for i := 0; i < n; i++ {
+ t := s.GetTransitions()[i]
+
+ if t1, ok := t.(*RuleTransition); ok {
+ if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
+ continue
+ }
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+ la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
+ } else if t2, ok := t.(AbstractPredicateTransition); ok {
+ if seeThruPreds {
+ la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else {
+ look.addOne(LL1AnalyzerHitPred)
+ }
+ } else if t.getIsEpsilon() {
+ la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else if _, ok := t.(*WildcardTransition); ok {
+ look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
+ } else {
+ set := t.getLabel()
+ if set != nil {
+ if _, ok := t.(*NotSetTransition); ok {
+ set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
+ }
+ look.addSet(set)
+ }
+ }
+ }
+}
+
+func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]],
+ calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+
+ defer func() {
+ calledRuleStack.remove(t1.getTarget().GetRuleIndex())
+ }()
+
+ calledRuleStack.add(t1.getTarget().GetRuleIndex())
+ la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
new file mode 100644
index 000000000..923c7b52c
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go
@@ -0,0 +1,47 @@
+//go:build !antlr.stats
+
+package antlr
+
+// This file is compiled when the build configuration antlr.stats is not enabled.
+// which then allows the compiler to optimize out all the code that is not used.
+const collectStats = false
+
+// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled.
+type goRunStats struct {
+}
+
+var Statistics = &goRunStats{}
+
+func (s *goRunStats) AddJStatRec(_ *JStatRec) {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) CollectionAnomalies() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Reset() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+func (s *goRunStats) Report(dir string, prefix string) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func (s *goRunStats) Analyze() {
+ // Do nothing - compiler will optimize this out (hopefully)
+}
+
+type statsOption func(*goRunStats) error
+
+func (s *goRunStats) Configure(options ...statsOption) error {
+ // Do nothing - compiler will optimize this out (hopefully)
+ return nil
+}
+
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ return nil
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser.go b/vendor/github.com/antlr4-go/antlr/v4/parser.go
new file mode 100644
index 000000000..fb57ac15d
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser.go
@@ -0,0 +1,700 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type Parser interface {
+ Recognizer
+
+ GetInterpreter() *ParserATNSimulator
+
+ GetTokenStream() TokenStream
+ GetTokenFactory() TokenFactory
+ GetParserRuleContext() ParserRuleContext
+ SetParserRuleContext(ParserRuleContext)
+ Consume() Token
+ GetParseListeners() []ParseTreeListener
+
+ GetErrorHandler() ErrorStrategy
+ SetErrorHandler(ErrorStrategy)
+ GetInputStream() IntStream
+ GetCurrentToken() Token
+ GetExpectedTokens() *IntervalSet
+ NotifyErrorListeners(string, Token, RecognitionException)
+ IsExpectedToken(int) bool
+ GetPrecedence() int
+ GetRuleInvocationStack(ParserRuleContext) []string
+}
+
+type BaseParser struct {
+ *BaseRecognizer
+
+ Interpreter *ParserATNSimulator
+ BuildParseTrees bool
+
+ input TokenStream
+ errHandler ErrorStrategy
+ precedenceStack IntStack
+ ctx ParserRuleContext
+
+ tracer *TraceListener
+ parseListeners []ParseTreeListener
+ _SyntaxErrors int
+}
+
+// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error
+// recovery stuff.
+//
+//goland:noinspection GoUnusedExportedFunction
+func NewBaseParser(input TokenStream) *BaseParser {
+
+ p := new(BaseParser)
+
+ p.BaseRecognizer = NewBaseRecognizer()
+
+ // The input stream.
+ p.input = nil
+
+ // The error handling strategy for the parser. The default value is a new
+ // instance of {@link DefaultErrorStrategy}.
+ p.errHandler = NewDefaultErrorStrategy()
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+
+ // The ParserRuleContext object for the currently executing rule.
+ // p.is always non-nil during the parsing process.
+ p.ctx = nil
+
+ // Specifies whether the parser should construct a parse tree during
+ // the parsing process. The default value is {@code true}.
+ p.BuildParseTrees = true
+
+ // When setTrace(true) is called, a reference to the
+ // TraceListener is stored here, so it can be easily removed in a
+ // later call to setTrace(false). The listener itself is
+ // implemented as a parser listener so p.field is not directly used by
+ // other parser methods.
+ p.tracer = nil
+
+ // The list of ParseTreeListener listeners registered to receive
+ // events during the parse.
+ p.parseListeners = nil
+
+ // The number of syntax errors Reported during parsing. p.value is
+ // incremented each time NotifyErrorListeners is called.
+ p._SyntaxErrors = 0
+ p.SetInputStream(input)
+
+ return p
+}
+
+// This field maps from the serialized ATN string to the deserialized [ATN] with
+// bypass alternatives.
+//
+// [ATNDeserializationOptions.isGenerateRuleBypassTransitions]
+//
+//goland:noinspection GoUnusedGlobalVariable
+var bypassAltsAtnCache = make(map[string]int)
+
+// reset the parser's state//
+func (p *BaseParser) reset() {
+ if p.input != nil {
+ p.input.Seek(0)
+ }
+ p.errHandler.reset(p)
+ p.ctx = nil
+ p._SyntaxErrors = 0
+ p.SetTrace(nil)
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ if p.Interpreter != nil {
+ p.Interpreter.reset()
+ }
+}
+
+func (p *BaseParser) GetErrorHandler() ErrorStrategy {
+ return p.errHandler
+}
+
+func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
+ p.errHandler = e
+}
+
+// Match current input symbol against {@code ttype}. If the symbol type
+// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
+// called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @param ttype the token type to Match
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// {@code ttype} and the error strategy could not recover from the
+// mismatched symbol
+
+func (p *BaseParser) Match(ttype int) Token {
+
+ t := p.GetCurrentToken()
+
+ if t.GetTokenType() == ttype {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.HasError() {
+ return nil
+ }
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+
+ return t
+}
+
+// Match current input symbol as a wildcard. If the symbol type Matches
+// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
+// and {@link //consume} are called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// a wildcard and the error strategy could not recover from the mismatched
+// symbol
+
+func (p *BaseParser) MatchWildcard() Token {
+ t := p.GetCurrentToken()
+ if t.GetTokenType() > 0 {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a new token during single token
+ // insertion if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+ return t
+}
+
+func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
+ return p.ctx
+}
+
+func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
+ p.ctx = v
+}
+
+func (p *BaseParser) GetParseListeners() []ParseTreeListener {
+ if p.parseListeners == nil {
+ return make([]ParseTreeListener, 0)
+ }
+ return p.parseListeners
+}
+
+// AddParseListener registers listener to receive events during the parsing process.
+//
+// To support output-preserving grammar transformations (including but not
+// limited to left-recursion removal, automated left-factoring, and
+// optimized code generation), calls to listener methods during the parse
+// may differ substantially from calls made by
+// [ParseTreeWalker.DEFAULT] used after the parse is complete. In
+// particular, rule entry and exit events may occur in a different order
+// during the parse than after the parser. In addition, calls to certain
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+// - Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// - Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// - Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
+ if listener == nil {
+ panic("listener")
+ }
+ if p.parseListeners == nil {
+ p.parseListeners = make([]ParseTreeListener, 0)
+ }
+ p.parseListeners = append(p.parseListeners, listener)
+}
+
+// RemoveParseListener removes listener from the list of parse listeners.
+//
+// If listener is nil or has not been added as a parse
+// listener, this func does nothing.
+func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
+
+ if p.parseListeners != nil {
+
+ idx := -1
+ for i, v := range p.parseListeners {
+ if v == listener {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return
+ }
+
+ // remove the listener from the slice
+ p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
+
+ if len(p.parseListeners) == 0 {
+ p.parseListeners = nil
+ }
+ }
+}
+
+// Remove all parse listeners.
+func (p *BaseParser) removeParseListeners() {
+ p.parseListeners = nil
+}
+
+// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event.
+func (p *BaseParser) TriggerEnterRuleEvent() {
+ if p.parseListeners != nil {
+ ctx := p.ctx
+ for _, listener := range p.parseListeners {
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+ }
+ }
+}
+
+// TriggerExitRuleEvent notifies any parse listeners of an exit rule event.
+func (p *BaseParser) TriggerExitRuleEvent() {
+ if p.parseListeners != nil {
+ // reverse order walk of listeners
+ ctx := p.ctx
+ l := len(p.parseListeners) - 1
+
+ for i := range p.parseListeners {
+ listener := p.parseListeners[l-i]
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+ }
+ }
+}
+
+func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
+ return p.Interpreter
+}
+
+func (p *BaseParser) GetATN() *ATN {
+ return p.Interpreter.atn
+}
+
+func (p *BaseParser) GetTokenFactory() TokenFactory {
+ return p.input.GetTokenSource().GetTokenFactory()
+}
+
+// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens.
+func (p *BaseParser) setTokenFactory(factory TokenFactory) {
+ p.input.GetTokenSource().setTokenFactory(factory)
+}
+
+// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it
+// lazily.
+func (p *BaseParser) GetATNWithBypassAlts() {
+
+ // TODO - Implement this?
+ panic("Not implemented!")
+
+ // serializedAtn := p.getSerializedATN()
+ // if (serializedAtn == nil) {
+ // panic("The current parser does not support an ATN with bypass alternatives.")
+ // }
+ // result := p.bypassAltsAtnCache[serializedAtn]
+ // if (result == nil) {
+ // deserializationOptions := NewATNDeserializationOptions(nil)
+ // deserializationOptions.generateRuleBypassTransitions = true
+ // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
+ // p.bypassAltsAtnCache[serializedAtn] = result
+ // }
+ // return result
+}
+
+// The preferred method of getting a tree pattern. For example, here's a
+// sample use:
+//
+//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// SetTokenStream installs input as the token stream and resets the parser.
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// GetCurrentToken returns the current token at LT(1).
+//
+// [Match] needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.InErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have a new localctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ _, _ = p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+//goland:noinspection GoUnusedParameter
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+// IsExpectedToken checks whether symbol can follow the current state in the
+// {ATN}. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+// return getExpectedTokens().contains(symbol)
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// GetExpectedTokens and returns the set of input symbols which could follow the current parser
+// state and context, as given by [GetState] and [GetContext],
+// respectively.
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found.
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// GetRuleInvocationStack returns a list of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// GetDFAStrings returns a list of all DFA states used for debugging purposes
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// DumpDFA prints the whole of the DFA for debugging
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.Len() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// SetTrace installs a trace listener for the parse.
+//
+// During a parse it is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. This is for quick and dirty debugging.
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
new file mode 100644
index 000000000..ae2869692
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go
@@ -0,0 +1,1668 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var ()
+
+// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over
+// a standard JStore so that we can use Lazy instantiation of the JStore, mostly
+// to avoid polluting the stats module with a ton of JStore instances with nothing in them.
+type ClosureBusy struct {
+ bMap *JStore[*ATNConfig, Comparator[*ATNConfig]]
+ desc string
+}
+
+// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules
+func NewClosureBusy(desc string) *ClosureBusy {
+ return &ClosureBusy{
+ desc: desc,
+ }
+}
+
+func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) {
+ if c.bMap == nil {
+ c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc)
+ }
+ return c.bMap.Put(config)
+}
+
+type ParserATNSimulator struct {
+ BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *JPCMap
+ outerContext ParserRuleContext
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := &ParserATNSimulator{
+ BaseATNSimulator: BaseATNSimulator{
+ atn: atn,
+ sharedContextCache: sharedContextCache,
+ },
+ }
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. [JPCMap]
+ // isn't Synchronized, but we're ok since two threads shouldn't reuse same
+ // parser/atn-simulator object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a) -> c should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // whack cache after each prediction
+ // Do not attempt to run a GC now that we're done with the cache as makes the
+ // GC overhead terrible for badly formed grammars and has little effect on well formed
+ // grammars.
+ // I have made some extra effort to try and reduce memory pressure by reusing allocations when
+ // possible. However, it can only have a limited effect. The real solution is to encourage grammar
+ // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect
+ // what is happening at runtime, along with using the error listener to report ambiguities.
+
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ p.atn.stateMu.RLock()
+ if dfa.getPrecedenceDfa() {
+ p.atn.edgeMu.RLock()
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ p.atn.edgeMu.RUnlock()
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.getS0()
+ }
+ p.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx)
+
+ p.atn.stateMu.Lock()
+ if dfa.getPrecedenceDfa() {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ dfa.s0.configs = s0Closure
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ p.atn.edgeMu.Lock()
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ p.atn.edgeMu.Unlock()
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setS0(s0)
+ }
+ p.atn.stateMu.Unlock()
+ }
+
+ alt, re := p.execATN(dfa, s0, input, index, outerContext)
+ parser.SetError(re)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// execATN performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+//
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+//
+// - If the set is empty, there is no viable alternative for current symbol
+// - Does the state uniquely predict an alternative?
+// - Does the state have a conflict that would prevent us from
+// putting it on the work list?
+//
+// We also have some key operations to do:
+//
+// - Add an edge from previous DFA state to potentially NewDFA state, D,
+// - Upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// - Collecting predicates and adding semantic context to DFA accept states
+// - adding rule context to context-sensitive DFA accept states
+// - Consuming an input symbol
+// - Reporting a conflict
+// - Reporting an ambiguity
+// - Reporting a context sensitivity
+// - Reporting insufficient predicates
+//
+// Cover these cases:
+//
+// - dead end
+// - single alt
+// - single alt + predicates
+// - conflict
+// - conflict + predicates
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ ", DFA state " + s0.String() +
+ ", LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ p.parser.SetError(e)
+ return ATNInvalidAltNumber, e
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.conflictingAlts
+ if D.predicates != nil {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL fail-over")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue(), nil
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt, re
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction, nil
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+
+ switch alts.length() {
+ case 0:
+ return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex)
+ case 1:
+ return alts.minValue(), nil
+ default:
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue(), nil
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ if t+1 < 0 {
+ return nil
+ }
+
+ p.atn.edgeMu.RLock()
+ defer p.atn.edgeMu.RUnlock()
+ edges := previousD.getEdges()
+ if edges == nil || t+1 >= len(edges) {
+ return nil
+ }
+ return previousD.getIthEdge(t + 1)
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create new target state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.uniqueAlt = predictedAlt
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.conflictingAlts = p.getConflictingAlts(reach)
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.conflictingAlts.minValue())
+ }
+ if D.isAcceptState && D.configs.hasSemanticContext {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) {
+
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach *ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt, nil
+ }
+ return alt, p.noViableAlt(input, outerContext, previous, startIndex)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.uniqueAlt = p.getUniqueAlt(reach)
+ // unique prediction?
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ predictedAlt = reach.uniqueAlt
+ break
+ }
+ if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.uniqueAlt != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt, nil
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have non-conflicting alt subsets as in:
+ //
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+ //
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
+
+ return predictedAlt, nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet {
+ if p.mergeCache == nil {
+ p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()")
+ }
+ intermediate := NewATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var skippedStopStates []*ATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.configs {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ if fullCtx || t == TokenEOF {
+ skippedStopStates = append(skippedStopStates, c)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for _, trans := range c.GetState().GetTransitions() {
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+
+ // Now figure out where the reach operation can take us...
+ var reach *ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if skippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewATNConfigSet(fullCtx)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy")
+ treatEOFAsEpsilon := t == TokenEOF
+ amount := len(intermediate.configs)
+ for k := 0; k < amount; k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet this condition whether it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate))
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(skippedStopStates); l++ {
+ reach.Add(skippedStopStates[l], p.mergeCache)
+ }
+ }
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String())
+ }
+
+ if len(reach.configs) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from
+// configs which are in a [RuleStopState]. If all
+// configurations in configs are already in a rule stop state, this
+// method simply returns configs.
+//
+// When lookToEndOfRule is true, this method uses
+// [ATN].[NextTokens] for each configuration in configs which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// When lookToEndOfRule is true, this method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// configs.
+//
+// The func returns configs if all configurations in configs are in a
+// rule stop state, otherwise it returns a new configuration set containing only
+// the configurations from configs which are in a rule stop state
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewATNConfigSet(configs.fullCtx)
+ for _, config := range configs.configs {
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewATNConfigSet(fullCtx)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("computeStartState from ATN state " + a.String() +
+ " initialContext=" + initialContext.String())
+ }
+
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewATNConfig6(target, i+1, initialContext)
+ closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy")
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+// applyPrecedenceFilter transforms the start state computed by
+// [computeStartState] to the special start state used by a
+// precedence [DFA] for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+// 1. Evaluate the precedence predicates for each configuration using
+// [SemanticContext].evalPrecedence.
+// 2. Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context.
+//
+// Transformation 2 is valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+// The prediction context must be considered by this filter to address
+// situations like the following:
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+// In the above grammar, the [ATN] state immediately before the token
+// reference 'a' in letterA is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// statement. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to prog, and then back in to statement
+// from being eliminated by the filter.
+//
+// The func returns the transformed configuration set representing the start state
+// for a precedence [DFA] at a particular precedence level (determined by
+// calling [Parser].getPrecedence).
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]*PredictionContext)
+ configSet := NewATNConfigSet(configs.fullCtx)
+
+ for _, config := range configs.configs {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.configs {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.Equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.configs {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i <= nalts; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // unambiguous alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // un-predicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by
+// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the
+// Error state was reached during [ATN] simulation.
+//
+// The default implementation of this method uses the following
+// algorithm to identify an [ATN] configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// [ParserRuleContext] returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+// - If a syntactically valid path or paths reach the end of the decision rule, and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+// - Otherwise, return [ATNInvalidAltNumber].
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a [FailedPredicateException] in
+// the parser. Specifically, this could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing this alternative within
+// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting
+// [FailedPredicateException] in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+// pass in the configs holding ATN configurations which were valid immediately before
+// the ERROR state was reached, outerContext as the initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// Teh func returns the value to return from [AdaptivePredict], or
+// [ATNInvalidAltNumber] if a suitable alternative was not
+// identified and [AdaptivePredict] should report an error instead.
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.configs) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.configs {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 *ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet {
+ succeeded := NewATNConfigSet(configs.fullCtx)
+ failed := NewATNConfigSet(configs.fullCtx)
+
+ for _, c := range configs.configs {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []*ATNConfigSet{succeeded, failed}
+}
+
+// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the
+// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an
+// un-predicated runtimeConfig which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ var stack []*ATNConfig
+ visited := make(map[*ATNConfig]bool)
+
+ stack = append(stack, config)
+
+ for len(stack) > 0 {
+ currConfig := stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+
+ if _, ok := visited[currConfig]; ok {
+ continue
+ }
+ visited[currConfig] = true
+
+ if _, ok := currConfig.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !currConfig.GetContext().isEmpty() {
+ for i := 0; i < currConfig.GetContext().length(); i++ {
+ if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[currConfig.GetContext().getReturnState(i)]
+ newContext := currConfig.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext())
+
+ stack = append(stack, c)
+ }
+ continue
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(currConfig, p.mergeCache)
+ continue
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex()))
+ }
+ }
+ }
+
+ p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("closure(" + config.String() + ")")
+ }
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY)
+ configs.Add(nb, p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
+ continue
+ }
+
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if c != nil {
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if this is > 0.
+
+ if p.dfa != nil && p.dfa.getPrecedenceDfa() {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method
+ newDepth--
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else {
+
+ if !t.getIsEpsilon() {
+ _, present := closureBusy.Put(c)
+ if present {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ }
+ if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool {
+ if !runtimeConfig.lRLoopEntryBranchOpt {
+ return false
+ }
+
+ _p := config.GetState()
+
+ // First check to see if we are in StarLoopEntryState generated during
+ // left-recursion elimination. For efficiency, also check if
+ // the context has an empty stack case. If so, it would mean
+ // global FOLLOW so we can't perform optimization
+ if _p.GetStateType() != ATNStateStarLoopEntry {
+ return false
+ }
+ startLoop, ok := _p.(*StarLoopEntryState)
+ if !ok {
+ return false
+ }
+ if !startLoop.precedenceRuleDecision ||
+ config.GetContext().isEmpty() ||
+ config.GetContext().hasEmptyPath() {
+ return false
+ }
+
+ // Require all return states to return back to the same rule
+ // that p is in.
+ numCtxs := config.GetContext().length()
+ for i := 0; i < numCtxs; i++ {
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ if returnState.GetRuleIndex() != _p.GetRuleIndex() {
+ return false
+ }
+ }
+ x := _p.GetTransitions()[0].getTarget()
+ decisionStartState := x.(BlockStartState)
+ blockEndStateNum := decisionStartState.getEndState().stateNumber
+ blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
+
+ // Verify that the top of each stack context leads to loop entry/exit
+ // state through epsilon edges and w/o leaving rule.
+
+ for i := 0; i < numCtxs; i++ { // for each stack context
+ returnStateNumber := config.GetContext().getReturnState(i)
+ returnState := p.atn.states[returnStateNumber]
+
+ // all states must have single outgoing epsilon edge
+ if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
+ return false
+ }
+
+ // Look for prefix op case like 'not expr', (' type ')' expr
+ returnStateTarget := returnState.GetTransitions()[0].getTarget()
+ if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
+ continue
+ }
+
+ // Look for 'expr op expr' or case where expr's return state is block end
+ // of (...)* internal block; the block end points to loop back
+ // which points to p but we don't need to check that
+ if returnState == blockEndState {
+ continue
+ }
+
+ // Look for ternary expr ? expr : expr. The return state points at block end,
+ // which points at loop entry state
+ if returnStateTarget == blockEndState {
+ continue
+ }
+
+ // Look for complex prefix 'between expr and expr' case where 2nd expr's
+ // return state points at block end state of (...)* internal block
+ if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
+ len(returnStateTarget.GetTransitions()) == 1 &&
+ returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
+ returnStateTarget.GetTransitions()[0].getTarget() == _p {
+ continue
+ }
+
+ // anything else ain't conforming
+ return false
+ }
+
+ return true
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+ var sb strings.Builder
+ sb.Grow(32)
+
+ sb.WriteString("')
+ return sb.String()
+}
+
+func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig {
+
+ switch t.getSerializationType() {
+ case TransitionRULE:
+ return p.ruleTransition(config, t.(*RuleTransition))
+ case TransitionPRECEDENCE:
+ return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionPREDICATE:
+ return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionACTION:
+ return p.actionTransition(config, t.(*ActionTransition))
+ case TransitionEPSILON:
+ return NewATNConfig4(config, t.getTarget())
+ case TransitionATOM, TransitionRANGE, TransitionSET:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
+ }
+ return NewATNConfig4(config, t.getTarget())
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
+ strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && inContext {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the runtimeConfig sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("runtimeConfig from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig {
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
+ ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *ATNConfig
+ if collectPredicates && (!pt.isCtxDependent || inContext) {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewATNConfig4(config, pt.getTarget())
+ }
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
+ }
+ returnState := t.followState
+ newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
+ return NewATNConfig1(config, t.getTarget(), newContext)
+}
+
+func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet {
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModeGetAlts(altsets)
+}
+
+// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of
+// ambiguous states. If we have another state associated with conflicting
+// alternatives, we should keep going. For example, the following grammar
+//
+// s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ;, it has a [DFA]
+// state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]].
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node
+// because alternative to has another way to continue, via
+//
+// [6|2|[]].
+//
+// The key is that we have a single state that has config's only associated
+// with a single alternative, 2, and crucially the state transitions
+// among the configurations are all non-epsilon transitions. That means
+// we don't consider any conflicts that include alternative 2. So, we
+// ignore the conflict between alts 1 and 2. We ignore a set of
+// conflicting alts when there is an intersection with an alternative
+// associated with a single alt state in the state config-list map.
+//
+// It's also the case that we might have two conflicting configurations but
+// also a 3rd non-conflicting configuration for a different alternative:
+//
+// [1|1|[], 1|2|[], 8|3|[]].
+//
+// This can come about from grammar:
+//
+// a : A | A | A B
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, so we do not
+// stop working on this state.
+//
+// In the previous example, we're concerned
+// with states associated with the conflicting alternatives. Here alt
+// 3 is not associated with the conflicting configs, but since we can continue
+// looking for input reasonably, I don't declare the state done. We
+// ignore a set of conflicting alts when we have an alternative
+// that we still need to pursue.
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet {
+ var conflictingAlts *BitSet
+ if configs.uniqueAlt != ATNInvalidAltNumber {
+ conflictingAlts = NewBitSet()
+ conflictingAlts.add(configs.uniqueAlt)
+ } else {
+ conflictingAlts = configs.conflictingAlts
+ }
+ return conflictingAlts
+}
+
+func (p *ParserATNSimulator) GetTokenName(t int) string {
+ if t == TokenEOF {
+ return "EOF"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) {
+ return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+
+ return strconv.Itoa(t)
+}
+
+func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
+ return p.GetTokenName(input.LA(1))
+}
+
+// Used for debugging in [AdaptivePredict] around [execATN], but I cut
+// it out for clarity now that alg. works well. We can leave this
+// "dead" code for a bit.
+func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) {
+
+ panic("Not implemented")
+
+ // fmt.Println("dead end configs: ")
+ // var decs = nvae.deadEndConfigs
+ //
+ // for i:=0; i0) {
+ // var t = c.state.GetTransitions()[0]
+ // if t2, ok := t.(*AtomTransition); ok {
+ // trans = "Atom "+ p.GetTokenName(t2.label)
+ // } else if t3, ok := t.(SetTransition); ok {
+ // _, ok := t.(*NotSetTransition)
+ //
+ // var s string
+ // if (ok){
+ // s = "~"
+ // }
+ //
+ // trans = s + "Set " + t3.set
+ // }
+ // }
+ // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
+ // }
+}
+
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException {
+ return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
+}
+
+func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int {
+ alt := ATNInvalidAltNumber
+ for _, c := range configs.configs {
+ if alt == ATNInvalidAltNumber {
+ alt = c.GetAlt() // found first alt
+ } else if c.GetAlt() != alt {
+ return ATNInvalidAltNumber
+ }
+ }
+ return alt
+}
+
+// Add an edge to the DFA, if possible. This method calls
+// {@link //addDFAState} to ensure the {@code to} state is present in the
+// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
+// range of edges that can be represented in the DFA tables, p method
+// returns without adding the edge to the DFA.
+//
+// If {@code to} is {@code nil}, p method returns {@code nil}.
+// Otherwise, p method returns the {@link DFAState} returned by calling
+// {@link //addDFAState} for the {@code to} state.
+//
+// @param dfa The DFA
+// @param from The source state for the edge
+// @param t The input symbol
+// @param to The target state for the edge
+//
+// @return If {@code to} is {@code nil}, p method returns {@code nil}
+// otherwise p method returns the result of calling {@link //addDFAState}
+// on {@code to}
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
+ if runtimeConfig.parserATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
+ }
+ if to == nil {
+ return nil
+ }
+ p.atn.stateMu.Lock()
+ to = p.addDFAState(dfa, to) // used existing if possible not incoming
+ p.atn.stateMu.Unlock()
+ if from == nil || t < -1 || t > p.atn.maxTokenType {
+ return to
+ }
+ p.atn.edgeMu.Lock()
+ if from.getEdges() == nil {
+ from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
+ }
+ from.setIthEdge(t+1, to) // connect
+ p.atn.edgeMu.Unlock()
+
+ if runtimeConfig.parserATNSimulatorDebug {
+ var names []string
+ if p.parser != nil {
+ names = p.parser.GetLiteralNames()
+ }
+
+ fmt.Println("DFA=\n" + dfa.String(names, nil))
+ }
+ return to
+}
+
+// addDFAState adds state D to the [DFA] if it is not already present, and returns
+// the actual instance stored in the [DFA]. If a state equivalent to D
+// is already in the [DFA], the existing state is returned. Otherwise, this
+// method returns D after adding it to the [DFA].
+//
+// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and
+// does not change the DFA.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
+ if d == ATNSimulatorError {
+ return d
+ }
+
+ existing, present := dfa.Get(d)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Print("addDFAState " + d.String() + " exists")
+ }
+ return existing
+ }
+
+ // The state will be added if not already there or we will be given back the existing state struct
+ // if it is present.
+ //
+ d.stateNumber = dfa.Len()
+ if !d.configs.readOnly {
+ d.configs.OptimizeConfigs(&p.BaseATNSimulator)
+ d.configs.readOnly = true
+ d.configs.configLookup = nil
+ }
+ dfa.Put(d)
+
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("addDFAState new " + d.String())
+ }
+
+ return d
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
+
+// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route.
+//
+// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer
+// so that they can see that this is happening and can take action if they want to.
+//
+//goland:noinspection GoBoolExpressions
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs *ATNConfigSet) {
+ if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
similarity index 77%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
rename to vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
index 1c8cee747..c249bc138 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go
@@ -31,7 +31,9 @@ type ParserRuleContext interface {
}
type BaseParserRuleContext struct {
- *BaseRuleContext
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
start, stop Token
exception RecognitionException
@@ -40,8 +42,22 @@ type BaseParserRuleContext struct {
func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
prc := new(BaseParserRuleContext)
+ InitBaseParserRuleContext(prc, parent, invokingStateNumber)
+ return prc
+}
+
+func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) {
+ // What context invoked b rule?
+ prc.parentCtx = parent
- prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ prc.invokingState = -1
+ } else {
+ prc.invokingState = invokingStateNumber
+ }
prc.RuleIndex = -1
// * If we are debugging or building a parse tree for a Visitor,
@@ -56,8 +72,6 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int)
// The exception that forced prc rule to return. If the rule successfully
// completed, prc is {@code nil}.
prc.exception = nil
-
- return prc
}
func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
@@ -90,14 +104,15 @@ func (prc *BaseParserRuleContext) GetText() string {
return s
}
-// Double dispatch methods for listeners
-func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
+// EnterRule is called when any rule is entered.
+func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) {
}
-func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
+// ExitRule is called when any rule is exited.
+func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) {
}
-// * Does not set parent link other add methods do that///
+// * Does not set parent link other add methods do that
func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
if prc.children == nil {
prc.children = make([]Tree, 0)
@@ -120,10 +135,9 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
return child
}
-// * Used by EnterOuterAlt to toss out a RuleContext previously added as
-// we entered a rule. If we have // label, we will need to remove
-// generic ruleContext object.
-// /
+// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as
+// we entered a rule. If we have a label, we will need to remove
+// the generic ruleContext object.
func (prc *BaseParserRuleContext) RemoveLastChild() {
if prc.children != nil && len(prc.children) > 0 {
prc.children = prc.children[0 : len(prc.children)-1]
@@ -293,7 +307,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int {
return len(prc.children)
}
-func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
+func (prc *BaseParserRuleContext) GetSourceInterval() Interval {
if prc.start == nil || prc.stop == nil {
return TreeInvalidInterval
}
@@ -340,6 +354,50 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s
return s
}
+func (prc *BaseParserRuleContext) SetParent(v Tree) {
+ if v == nil {
+ prc.parentCtx = nil
+ } else {
+ prc.parentCtx = v.(RuleContext)
+ }
+}
+
+func (prc *BaseParserRuleContext) GetInvokingState() int {
+ return prc.invokingState
+}
+
+func (prc *BaseParserRuleContext) SetInvokingState(t int) {
+ prc.invokingState = t
+}
+
+func (prc *BaseParserRuleContext) GetRuleIndex() int {
+ return prc.RuleIndex
+}
+
+func (prc *BaseParserRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (prc *BaseParserRuleContext) SetAltNumber(_ int) {}
+
+// IsEmpty returns true if the context of b is empty.
+//
+// A context is empty if there is no invoking state, meaning nobody calls
+// current context.
+func (prc *BaseParserRuleContext) IsEmpty() bool {
+ return prc.invokingState == -1
+}
+
+// GetParent returns the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of this
+// method.
+func (prc *BaseParserRuleContext) GetParent() Tree {
+ return prc.parentCtx
+}
+
var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1)
type InterpreterRuleContext interface {
@@ -350,6 +408,7 @@ type BaseInterpreterRuleContext struct {
*BaseParserRuleContext
}
+//goland:noinspection GoUnusedExportedFunction
func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
prc := new(BaseInterpreterRuleContext)
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
new file mode 100644
index 000000000..c1b80cc1f
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go
@@ -0,0 +1,727 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "golang.org/x/exp/slices"
+ "strconv"
+)
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+const (
+ // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $
+ // doesn't mean wildcard:
+ //
+ // $ + x = [$,x]
+ //
+ // Here,
+ //
+ // $ = EmptyReturnState
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here
+//
+//goland:noinspection GoUnusedGlobalVariable
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+const (
+ PredictionContextEmpty = iota
+ PredictionContextSingleton
+ PredictionContextArray
+)
+
+// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to
+// emulate inheritance from Java, and can be used without an interface definition. An interface
+// is not required because no user code will ever need to implement this interface.
+type PredictionContext struct {
+ cachedHash int
+ pcType int
+ parentCtx *PredictionContext
+ returnState int
+ parents []*PredictionContext
+ returnStates []int
+}
+
+func NewEmptyPredictionContext() *PredictionContext {
+ nep := &PredictionContext{}
+ nep.cachedHash = calculateEmptyHash()
+ nep.pcType = PredictionContextEmpty
+ nep.returnState = BasePredictionContextEmptyReturnState
+ return nep
+}
+
+func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext {
+ pc := &PredictionContext{}
+ pc.pcType = PredictionContextSingleton
+ pc.returnState = returnState
+ pc.parentCtx = parent
+ if parent != nil {
+ pc.cachedHash = calculateHash(parent, returnState)
+ } else {
+ pc.cachedHash = calculateEmptyHash()
+ }
+ return pc
+}
+
+func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.Hash())
+ }
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ nec := &PredictionContext{}
+ nec.cachedHash = hash
+ nec.pcType = PredictionContextArray
+ nec.parents = parents
+ nec.returnStates = returnStates
+ return nec
+}
+
+func (p *PredictionContext) Hash() int {
+ return p.cachedHash
+}
+
+func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ otherP := other.(*PredictionContext)
+ return other == nil || otherP == nil || otherP.isEmpty()
+ case PredictionContextSingleton:
+ return p.SingletonEquals(other)
+ case PredictionContextArray:
+ return p.ArrayEquals(other)
+ }
+ return false
+}
+
+func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool {
+ if o == nil {
+ return false
+ }
+ other := o.(*PredictionContext)
+ if other == nil || other.pcType != PredictionContextArray {
+ return false
+ }
+ if p.cachedHash != other.Hash() {
+ return false // can't be same if hash is different
+ }
+
+ // Must compare the actual array elements and not just the array address
+ //
+ return slices.Equal(p.returnStates, other.returnStates) &&
+ slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool {
+ return x.Equals(y)
+ })
+}
+
+func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool {
+ if other == nil {
+ return false
+ }
+ otherP := other.(*PredictionContext)
+ if otherP == nil {
+ return false
+ }
+
+ if p.cachedHash != otherP.Hash() {
+ return false // Can't be same if hash is different
+ }
+
+ if p.returnState != otherP.getReturnState(0) {
+ return false
+ }
+
+ // Both parents must be nil if one is
+ if p.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return p.parentCtx.Equals(otherP.parentCtx)
+}
+
+func (p *PredictionContext) GetParent(i int) *PredictionContext {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return nil
+ case PredictionContextSingleton:
+ return p.parentCtx
+ case PredictionContextArray:
+ return p.parents[i]
+ }
+ return nil
+}
+
+func (p *PredictionContext) getReturnState(i int) int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates[i]
+ default:
+ return p.returnState
+ }
+}
+
+func (p *PredictionContext) GetReturnStates() []int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return p.returnStates
+ default:
+ return []int{p.returnState}
+ }
+}
+
+func (p *PredictionContext) length() int {
+ switch p.pcType {
+ case PredictionContextArray:
+ return len(p.returnStates)
+ default:
+ return 1
+ }
+}
+
+func (p *PredictionContext) hasEmptyPath() bool {
+ switch p.pcType {
+ case PredictionContextSingleton:
+ return p.returnState == BasePredictionContextEmptyReturnState
+ }
+ return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (p *PredictionContext) String() string {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return "$"
+ case PredictionContextSingleton:
+ var up string
+
+ if p.parentCtx == nil {
+ up = ""
+ } else {
+ up = p.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if p.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(p.returnState)
+ }
+
+ return strconv.Itoa(p.returnState) + " " + up
+ case PredictionContextArray:
+ if p.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(p.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if p.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(p.returnStates[i])
+ if !p.parents[i].isEmpty() {
+ s = s + " " + p.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+ return s + "]"
+
+ default:
+ return "unknown"
+ }
+}
+
+func (p *PredictionContext) isEmpty() bool {
+ switch p.pcType {
+ case PredictionContextEmpty:
+ return true
+ case PredictionContextArray:
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return p.returnStates[0] == BasePredictionContextEmptyReturnState
+ default:
+ return false
+ }
+}
+
+func (p *PredictionContext) Type() int {
+ return p.pcType
+}
+
+func calculateHash(parent *PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.Hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext {
+ if outerContext == nil {
+ outerContext = ParserRuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+
+ // Share same graph if both same
+ //
+ if a == b || a.Equals(b) {
+ return a
+ }
+
+ if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton {
+ return mergeSingletons(a, b, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as wildcard
+ if rootIsWildcard {
+ if a.isEmpty() {
+ return a
+ }
+ if b.isEmpty() {
+ return b
+ }
+ }
+
+ // Convert either Singleton or Empty to arrays, so that we can merge them
+ //
+ ara := convertToArray(a)
+ arb := convertToArray(b)
+ return mergeArrays(ara, arb, rootIsWildcard, mergeCache)
+}
+
+func convertToArray(pc *PredictionContext) *PredictionContext {
+ switch pc.Type() {
+ case PredictionContextEmpty:
+ return NewArrayPredictionContext([]*PredictionContext{}, []int{})
+ case PredictionContextSingleton:
+ return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)})
+ default:
+ // Already an array
+ }
+ return pc
+}
+
+// mergeSingletons merges two Singleton [PredictionContext] instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+// Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A new root node is created to point to the
+// merged parents.
+//
+//
+// Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+// Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ return previous
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent.Equals(a.parentCtx) {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent.Equals(b.parentCtx) {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array.
+ // New joined parent so create a new singleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent *PredictionContext
+ if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []*PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []*PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []*PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.Put(a, b, apc)
+ }
+ return apc
+}
+
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+// Local-Context Merges
+//
+// These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+// Special case of last merge if local context.
+//
+//
+// Full-Context Merges
+//
+// These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+// Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext {
+ if rootIsWildcard {
+ if a.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b.pcType == PredictionContextEmpty {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a.isEmpty() && b.isEmpty() {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a.isEmpty() { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []*PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+// Different tops, different parents.
+//
+//
+// Shared top, same parents.
+//
+//
+// Shared top, different parents.
+//
+//
+// Shared top, all shared parents.
+//
+//
+// Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+//
+//goland:noinspection GoBoolExpressions
+func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext {
+ if mergeCache != nil {
+ previous, present := mergeCache.Get(a, b)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ previous, present = mergeCache.Get(b, a)
+ if present {
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous")
+ }
+ return previous
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.Put(a, b, pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation
+ if M.Equals(a) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, a)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a")
+ }
+ return a
+ }
+ if M.Equals(b) {
+ if mergeCache != nil {
+ mergeCache.Put(a, b, b)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b")
+ }
+ return b
+ }
+ combineCommonParents(&mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.Put(a, b, M)
+ }
+ if runtimeConfig.parserATNSimulatorTraceATNSim {
+ fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String())
+ }
+ return M
+}
+
+// Make pass over all M parents and merge any Equals() ones.
+// Note that we pass a pointer to the slice as we want to modify it in place.
+//
+//goland:noinspection GoUnusedFunction
+func combineCommonParents(parents *[]*PredictionContext) {
+ uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext")
+
+ for p := 0; p < len(*parents); p++ {
+ parent := (*parents)[p]
+ _, _ = uniqueParents.Put(parent)
+ }
+ for q := 0; q < len(*parents); q++ {
+ pc, _ := uniqueParents.Get((*parents)[q])
+ (*parents)[q] = pc
+ }
+}
+
+func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext {
+ if context.isEmpty() {
+ return context
+ }
+ existing, present := visited.Get(context)
+ if present {
+ return existing
+ }
+
+ existing, present = contextCache.Get(context)
+ if present {
+ visited.Put(context, existing)
+ return existing
+ }
+ changed := false
+ parents := make([]*PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || !parent.Equals(context.GetParent(i)) {
+ if !changed {
+ parents = make([]*PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited.Put(context, context)
+ return context
+ }
+ var updated *PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited.Put(updated, updated)
+ visited.Put(context, updated)
+
+ return updated
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
new file mode 100644
index 000000000..25dfb11e8
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go
@@ -0,0 +1,48 @@
+package antlr
+
+var BasePredictionContextEMPTY = &PredictionContext{
+ cachedHash: calculateEmptyHash(),
+ pcType: PredictionContextEmpty,
+ returnState: BasePredictionContextEmptyReturnState,
+}
+
+// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+type PredictionContextCache struct {
+ cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]]
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ return &PredictionContextCache{
+ cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"),
+ }
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a new context to the cache.
+// Protect shared cache from unsafe thread access.
+func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext {
+ if ctx.isEmpty() {
+ return BasePredictionContextEMPTY
+ }
+
+ // Put will return the existing entry if it is present (note this is done via Equals, not whether it is
+ // the same pointer), otherwise it will add the new entry and return that.
+ //
+ existing, present := p.cache.Get(ctx)
+ if present {
+ return existing
+ }
+ p.cache.Put(ctx, ctx)
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) {
+ pc, exists := p.cache.Get(ctx)
+ return pc, exists
+}
+
+func (p *PredictionContextCache) length() int {
+ return p.cache.Len()
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
new file mode 100644
index 000000000..3f85a6a52
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go
@@ -0,0 +1,536 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ // PredictionModeSLL represents the SLL(*) prediction mode.
+ // This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the SLL prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful LL prediction abilities to complete successfully.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+
+ // PredictionModeLL represents the LL(*) prediction mode.
+ // This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+
+ // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode
+ // with exact ambiguity detection.
+ //
+ // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition.
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases:
+//
+// - The usual SLL+LL fallback upon SLL conflict
+// - Pure SLL without LL fallback
+//
+// # Combined SLL+LL Parsing
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative, e.g.
+//
+// {1,2} and {3,4}
+//
+// If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// # Heuristic
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ;
+//
+// When the [ATN] simulation reaches the state before ';', it has a
+// [DFA] state that looks like:
+//
+// [12|1|[], 6|2|[], 12|2|[]]
+//
+// Naturally
+//
+// 12|1|[] and 12|2|[]
+//
+// conflict, but we cannot stop processing this node because alternative to has another way to continue,
+// via
+//
+// [6|2|[]]
+//
+// It also let's us continue for this rule:
+//
+// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state immediately before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue, and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// # Pure SLL Parsing
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// # Predicates in SLL+LL Parsing
+//
+// SLL decisions don't evaluate predicates until after they reach [DFA] stop
+// states because they need to create the [DFA] cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation, so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, [ATNConfigSet] combines stack contexts but not
+// semantic predicate contexts, so we might see two configurations like the
+// following:
+//
+// (s, 1, x, {}), (s, 1, x', {p})
+//
+// Before testing these configurations against others, we have to merge
+// x and x' (without modifying the existing configurations).
+// For example, we test (x+x')==x” when looking for conflicts in
+// the following configurations:
+//
+// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})
+//
+// If the configuration set has predicates (as indicated by
+// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of
+// the configurations to strip out all the predicates so that a standard
+// [ATNConfigSet] will merge everything ignoring predicates.
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool {
+
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input, so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.hasSemanticContext {
+ // dup configs, tossing out semantic predicates
+ dup := NewATNConfigSet(false)
+ for _, c := range configs.configs {
+
+ // NewATNConfig({semanticContext:}, c)
+ c = NewATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar predicates
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// The func returns true if any configuration in the supplied configs is in a [RuleStopState]
+func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool {
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a
+// [RuleStopState]. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// the func returns true if all configurations in configs are in a
+// [RuleStopState]
+func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool {
+
+ for _, c := range configs.configs {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination.
+//
+// Can we stop looking ahead during [ATN] simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations 'C', into
+// conflicting subsets (s, _, ctx, _) and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical [ATNConfig].state and [ATNConfig].context values
+// but a different [ATNConfig].alt value, e.g.
+//
+// (s, i, ctx, _)
+//
+// and
+//
+// (s, j, ctx, _) ; for i != j
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// A_s,ctx = {i | (s, i, ctx, _)}
+//
+// for each configuration in C holding s and ctx fixed.
+//
+// Or in pseudo-code:
+//
+// for each configuration c in C:
+// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred
+//
+// The values in map are the set of
+//
+// A_s,ctx
+//
+// sets.
+//
+// If
+//
+// |A_s,ctx| = 1
+//
+// then there is no conflict associated with s and ctx.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// further lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal; you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// # Conflicting Configs
+//
+// Two configurations:
+//
+// (s, i, x) and (s, j, x')
+//
+// conflict when i != j but x = x'. Because we merge all
+// (s, i, _) configurations together, that means that there are at
+// most n configurations associated with state s for
+// n possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts x and x'.
+//
+// Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either x or x'.
+// If the x associated with lowest alternative i
+// is the superset, then i is the only possible prediction since the
+// others resolve to min(i) as well. However, if x is
+// associated with j > i then at least one stack configuration for
+// j is not in conflict with alternative i. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing an equality check between x and
+// x', which lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// # Continue/Stop Rule
+//
+// Continue if the union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives,
+//
+// [i for (_, i, _)]
+//
+// tells us which alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// Cases
+//
+// - no conflicts and more than 1 alternative in set => continue
+// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set
+// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set
+// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1
+// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets
+// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2}
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets
+// {1} ∪ {2} = {1,2} => continue
+// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets
+// {1} ∪ {3} = {1,3} => continue
+//
+// # Exact Ambiguity Detection
+//
+// If all states report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set:
+//
+// |A_i| > 1
+//
+// and
+//
+// A_i = A_j ; for all i, j
+//
+// In other words, we continue examining lookahead until all A_i
+// have more than one alternative and all A_i are the same. If
+//
+// A={{1,2}, {1,3}}
+//
+// then regular LL prediction would terminate because the resolved set is {1}.
+// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like:
+//
+// A={{1,2}}
+//
+// or
+//
+// {{1,2},{1,2}}, etc...
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more
+// than one alternative.
+//
+// The func returns true if every [BitSet] in altsets has
+// [BitSet].cardinality cardinality > 1
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains
+// exactly one alternative.
+//
+// The func returns true if altsets contains at least one [BitSet] with
+// [BitSet].cardinality cardinality 1
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains
+// more than one alternative.
+//
+// The func returns true if altsets contains a [BitSet] with
+// [BitSet].cardinality cardinality > 1, otherwise false
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent.
+//
+// The func returns true if every member of altsets is equal to the others.
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in
+// altsets. If no such alternative exists, this method returns
+// [ATNInvalidAltNumber].
+//
+// @param altsets a collection of alternative subsets
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each [BitSet]
+// in altsets, being the set of represented alternatives in altsets.
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set.
+//
+// for each configuration c in configs:
+// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred
+func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet {
+ configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()")
+
+ for _, c := range configs.configs {
+
+ alts, ok := configToAlts.Get(c)
+ if !ok {
+ alts = NewBitSet()
+ configToAlts.Put(c, alts)
+ }
+ alts.add(c.GetAlt())
+ }
+
+ return configToAlts.Values()
+}
+
+// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set.
+//
+// for each configuration c in configs:
+// map[c.ATNConfig.state] U= c.ATNConfig.alt}
+func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.configs {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets
+// if there is one.
+//
+// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
new file mode 100644
index 000000000..2e0b504fb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go
@@ -0,0 +1,241 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strings"
+
+ "strconv"
+)
+
+type Recognizer interface {
+ GetLiteralNames() []string
+ GetSymbolicNames() []string
+ GetRuleNames() []string
+
+ Sempred(RuleContext, int, int) bool
+ Precpred(RuleContext, int) bool
+
+ GetState() int
+ SetState(int)
+ Action(RuleContext, int, int)
+ AddErrorListener(ErrorListener)
+ RemoveErrorListeners()
+ GetATN() *ATN
+ GetErrorListenerDispatch() ErrorListener
+ HasError() bool
+ GetError() RecognitionException
+ SetError(RecognitionException)
+}
+
+type BaseRecognizer struct {
+ listeners []ErrorListener
+ state int
+
+ RuleNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ GrammarFileName string
+ SynErr RecognitionException
+}
+
+func NewBaseRecognizer() *BaseRecognizer {
+ rec := new(BaseRecognizer)
+ rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
+ rec.state = -1
+ return rec
+}
+
+//goland:noinspection GoUnusedGlobalVariable
+var tokenTypeMapCache = make(map[string]int)
+
+//goland:noinspection GoUnusedGlobalVariable
+var ruleIndexMapCache = make(map[string]int)
+
+func (b *BaseRecognizer) checkVersion(toolVersion string) {
+ runtimeVersion := "4.12.0"
+ if runtimeVersion != toolVersion {
+ fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
+ }
+}
+
+func (b *BaseRecognizer) SetError(err RecognitionException) {
+ b.SynErr = err
+}
+
+func (b *BaseRecognizer) HasError() bool {
+ return b.SynErr != nil
+}
+
+func (b *BaseRecognizer) GetError() RecognitionException {
+ return b.SynErr
+}
+
+func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) {
+ panic("action not implemented on Recognizer!")
+}
+
+func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
+ b.listeners = append(b.listeners, listener)
+}
+
+func (b *BaseRecognizer) RemoveErrorListeners() {
+ b.listeners = make([]ErrorListener, 0)
+}
+
+func (b *BaseRecognizer) GetRuleNames() []string {
+ return b.RuleNames
+}
+
+func (b *BaseRecognizer) GetTokenNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetSymbolicNames() []string {
+ return b.SymbolicNames
+}
+
+func (b *BaseRecognizer) GetLiteralNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetState() int {
+ return b.state
+}
+
+func (b *BaseRecognizer) SetState(v int) {
+ b.state = v
+}
+
+//func (b *Recognizer) GetTokenTypeMap() {
+// var tokenNames = b.GetTokenNames()
+// if (tokenNames==nil) {
+// panic("The current recognizer does not provide a list of token names.")
+// }
+// var result = tokenTypeMapCache[tokenNames]
+// if(result==nil) {
+// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
+// result.EOF = TokenEOF
+// tokenTypeMapCache[tokenNames] = result
+// }
+// return result
+//}
+
+// GetRuleIndexMap Get a map from rule names to rule indexes.
+//
+// Used for XPath and tree pattern compilation.
+//
+// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed.
+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
+
+ panic("Method not defined!")
+ // var ruleNames = b.GetRuleNames()
+ // if (ruleNames==nil) {
+ // panic("The current recognizer does not provide a list of rule names.")
+ // }
+ //
+ // var result = ruleIndexMapCache[ruleNames]
+ // if(result==nil) {
+ // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
+ // ruleIndexMapCache[ruleNames] = result
+ // }
+ // return result
+}
+
+// GetTokenType get the token type based upon its name
+func (b *BaseRecognizer) GetTokenType(_ string) int {
+ panic("Method not defined!")
+ // var ttype = b.GetTokenTypeMap()[tokenName]
+ // if (ttype !=nil) {
+ // return ttype
+ // } else {
+ // return TokenInvalidType
+ // }
+}
+
+//func (b *Recognizer) GetTokenTypeMap() map[string]int {
+// Vocabulary vocabulary = getVocabulary()
+//
+// Synchronized (tokenTypeMapCache) {
+// Map result = tokenTypeMapCache.Get(vocabulary)
+// if (result == null) {
+// result = new HashMap()
+// for (int i = 0; i < GetATN().maxTokenType; i++) {
+// String literalName = vocabulary.getLiteralName(i)
+// if (literalName != null) {
+// result.put(literalName, i)
+// }
+//
+// String symbolicName = vocabulary.GetSymbolicName(i)
+// if (symbolicName != null) {
+// result.put(symbolicName, i)
+// }
+// }
+//
+// result.put("EOF", Token.EOF)
+// result = Collections.unmodifiableMap(result)
+// tokenTypeMapCache.put(vocabulary, result)
+// }
+//
+// return result
+// }
+//}
+
+// GetErrorHeader returns the error header, normally line/character position information.
+//
+// Can be overridden in sub structs embedding BaseRecognizer.
+func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
+ line := e.GetOffendingToken().GetLine()
+ column := e.GetOffendingToken().GetColumn()
+ return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
+}
+
+// GetTokenErrorDisplay shows how a token should be displayed in an error message.
+//
+// The default is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of [ANTLRErrorStrategy] may provide a similar
+// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay()
+func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+
+ return "'" + s + "'"
+}
+
+func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
+ return NewProxyErrorListener(b.listeners)
+}
+
+// Sempred embedding structs need to override this if there are sempreds or actions
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool {
+ return true
+}
+
+// Precpred embedding structs need to override this if there are preceding predicates
+// that the ATN interpreter needs to execute
+func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool {
+ return true
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
new file mode 100644
index 000000000..f2ad04793
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// RuleContext is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack.
+//
+// We actually carry no information about the rule associated with this context (except
+// when parsing). We keep only the state number of the invoking state from
+// the [ATN] submachine that invoked this. Contrast this with the s
+// pointer inside [ParserRuleContext] that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the struct
+// [ParserRuleContext], which embeds a RuleContext.
+//
+// @see ParserRuleContext
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
similarity index 92%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
rename to vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
index a702e99de..68cb9061e 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go
@@ -9,14 +9,13 @@ import (
"strconv"
)
-// A tree structure used to record the semantic context in which
-// an ATN configuration is valid. It's either a single predicate,
-// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
+// SemanticContext is a tree structure used to record the semantic context in which
//
-// I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
-// {@link SemanticContext} within the scope of this outer class.
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction p1 && p2, or a sum of products p1 || p2.
//
-
+// I have scoped the AND, OR, and Predicate subclasses of
+// [SemanticContext] within the scope of this outer ``class''
type SemanticContext interface {
Equals(other Collectable[SemanticContext]) bool
Hash() int
@@ -80,7 +79,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
var SemanticContextNone = NewPredicate(-1, -1, false)
-func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext {
return p
}
@@ -198,7 +197,7 @@ type AND struct {
func NewAND(a, b SemanticContext) *AND {
- operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands")
if aa, ok := a.(*AND); ok {
for _, o := range aa.opnds {
operands.Put(o)
@@ -230,9 +229,7 @@ func NewAND(a, b SemanticContext) *AND {
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
- for i, v := range vs {
- opnds[i] = v.(SemanticContext)
- }
+ copy(opnds, vs)
and := new(AND)
and.opnds = opnds
@@ -316,12 +313,12 @@ func (a *AND) Hash() int {
return murmurFinish(h, len(a.opnds))
}
-func (a *OR) Hash() int {
- h := murmurInit(41) // Init with a value different from AND
- for _, op := range a.opnds {
+func (o *OR) Hash() int {
+ h := murmurInit(41) // Init with o value different from AND
+ for _, op := range o.opnds {
h = murmurUpdate(h, op.Hash())
}
- return murmurFinish(h, len(a.opnds))
+ return murmurFinish(h, len(o.opnds))
}
func (a *AND) String() string {
@@ -349,7 +346,7 @@ type OR struct {
func NewOR(a, b SemanticContext) *OR {
- operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst)
+ operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands")
if aa, ok := a.(*OR); ok {
for _, o := range aa.opnds {
operands.Put(o)
@@ -382,9 +379,7 @@ func NewOR(a, b SemanticContext) *OR {
vs := operands.Values()
opnds := make([]SemanticContext, len(vs))
- for i, v := range vs {
- opnds[i] = v.(SemanticContext)
- }
+ copy(opnds, vs)
o := new(OR)
o.opnds = opnds
diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
new file mode 100644
index 000000000..70c0673a0
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go
@@ -0,0 +1,281 @@
+//go:build antlr.stats
+
+package antlr
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default
+// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag.
+//
+
+// Tells various components to collect statistics - because it is only true when this file is included, it will
+// allow the compiler to completely eliminate all the code that is only used when collecting statistics.
+const collectStats = true
+
+// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run.
+// It is exported so that it can be used by others to look for things that are not already looked for in the
+// runtime statistics.
+type goRunStats struct {
+
+ // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created
+ // during a run. It is exported so that it can be used by others to look for things that are not already looked for
+ // within this package.
+ //
+ jStats []*JStatRec
+ jStatsLock sync.RWMutex
+ topN int
+ topNByMax []*JStatRec
+ topNByUsed []*JStatRec
+ unusedCollections map[CollectionSource]int
+ counts map[CollectionSource]int
+}
+
+const (
+ collectionsFile = "collections"
+)
+
+var (
+ Statistics = &goRunStats{
+ topN: 10,
+ }
+)
+
+type statsOption func(*goRunStats) error
+
+// Configure allows the statistics system to be configured as the user wants and override the defaults
+func (s *goRunStats) Configure(options ...statsOption) error {
+ for _, option := range options {
+ err := option(s)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WithTopN sets the number of things to list in the report when we are concerned with the top N things.
+//
+// For example, if you want to see the top 20 collections by size, you can do:
+//
+// antlr.Statistics.Configure(antlr.WithTopN(20))
+func WithTopN(topN int) statsOption {
+ return func(s *goRunStats) error {
+ s.topN = topN
+ return nil
+ }
+}
+
+// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user.
+//
+// The function gathers and analyzes a number of statistics about any particular run of
+// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only
+// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be
+// especially useful in tracking down bugs or performance problems when an ANTLR user could
+// supply the output from this package, but cannot supply the grammar file(s) they are using, even
+// privately to the maintainers.
+//
+// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user
+// must call this function their selves to analyze the statistics. This is because none of the infrastructure is
+// extant unless the calling program is built with the antlr.stats tag like so:
+//
+// go build -tags antlr.stats .
+//
+// When a program is built with the antlr.stats tag, the Statistics object is created and available outside
+// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the
+// [Statistics.Report] function to report the statistics.
+//
+// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to
+// me [Jim Idle] directly at jimi@idle.ws
+//
+// [Jim Idle]: https:://github.com/jim-idle
+func (s *goRunStats) Analyze() {
+
+ // Look for anything that looks strange and record it in our local maps etc for the report to present it
+ //
+ s.CollectionAnomalies()
+ s.TopNCollections()
+}
+
+// TopNCollections looks through all the statistical records and gathers the top ten collections by size.
+func (s *goRunStats) TopNCollections() {
+
+ // Let's sort the stat records by MaxSize
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].MaxSize > s.jStats[j].MaxSize
+ })
+
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByMax = append(s.topNByMax, s.jStats[i])
+ }
+
+ // Sort by the number of times used
+ //
+ sort.Slice(s.jStats, func(i, j int) bool {
+ return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts
+ })
+ for i := 0; i < len(s.jStats) && i < s.topN; i++ {
+ s.topNByUsed = append(s.topNByUsed, s.jStats[i])
+ }
+}
+
+// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output
+// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be
+// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix.
+func (s *goRunStats) Report(dir string, prefix string) error {
+
+ isDir, err := isDirectory(dir)
+ switch {
+ case err != nil:
+ return err
+ case !isDir:
+ return fmt.Errorf("output directory `%s` is not a directory", dir)
+ }
+ s.reportCollections(dir, prefix)
+
+ // Clean out any old data in case the user forgets
+ //
+ s.Reset()
+ return nil
+}
+
+func (s *goRunStats) Reset() {
+ s.jStats = nil
+ s.topNByUsed = nil
+ s.topNByMax = nil
+}
+
+func (s *goRunStats) reportCollections(dir, prefix string) {
+ cname := filepath.Join(dir, ".asciidoctor")
+ // If the file doesn't exist, create it, or append to the file
+ f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ _, _ = f.WriteString(`// .asciidoctorconfig
+++++
+
+++++`)
+ _ = f.Close()
+
+ fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc")
+ // If the file doesn't exist, create it, or append to the file
+ f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func(f *os.File) {
+ err := f.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ }(f)
+ _, _ = f.WriteString("= Collections for " + prefix + "\n\n")
+
+ _, _ = f.WriteString("== Summary\n")
+
+ if s.unusedCollections != nil {
+ _, _ = f.WriteString("=== Unused Collections\n")
+ _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n")
+ _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n")
+ _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n")
+ _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n")
+ _, _ = f.WriteString(" actually needed.\n\n")
+
+ _, _ = f.WriteString("\n.Unused collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+
+ for k, v := range s.unusedCollections {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ f.WriteString("|===\n\n")
+ }
+
+ _, _ = f.WriteString("\n.Summary of Collections\n")
+ _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Type | Count\n")
+ for k, v := range s.counts {
+ _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n")
+ }
+ _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n")
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n")
+ for _, c := range s.topNByMax {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+
+ _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n")
+ _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n")
+ _, _ = f.WriteString("|===\n")
+ _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n")
+ for _, c := range s.topNByUsed {
+ _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n")
+ _, _ = f.WriteString("| " + c.Description + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n")
+ _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n")
+ _, _ = f.WriteString("\n")
+ }
+ _, _ = f.WriteString("|===\n\n")
+}
+
+// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled.
+func (s *goRunStats) AddJStatRec(rec *JStatRec) {
+ s.jStatsLock.Lock()
+ defer s.jStatsLock.Unlock()
+ s.jStats = append(s.jStats, rec)
+}
+
+// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found.
+func (s *goRunStats) CollectionAnomalies() {
+ s.jStatsLock.RLock()
+ defer s.jStatsLock.RUnlock()
+ s.counts = make(map[CollectionSource]int, len(s.jStats))
+ for _, c := range s.jStats {
+
+ // Accumlate raw counts
+ //
+ s.counts[c.Source]++
+
+ // Look for allocated but unused collections and count them
+ if c.MaxSize == 0 && c.Puts == 0 {
+ if s.unusedCollections == nil {
+ s.unusedCollections = make(map[CollectionSource]int)
+ }
+ s.unusedCollections[c.Source]++
+ }
+ if c.MaxSize > 6000 {
+ fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar")
+ }
+ }
+
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
new file mode 100644
index 000000000..4d9eb94e5
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go
@@ -0,0 +1,23 @@
+package antlr
+
+// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be
+// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies
+// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function
+// for ideas on what can be gleaned from these statistics about collections.
+type JStatRec struct {
+ Source CollectionSource
+ MaxSize int
+ CurSize int
+ Gets int
+ GetHits int
+ GetMisses int
+ GetHashConflicts int
+ GetNoEnt int
+ Puts int
+ PutHits int
+ PutMisses int
+ PutHashConflicts int
+ MaxSlotSize int
+ Description string
+ CreateStack []byte
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go
new file mode 100644
index 000000000..9670efb82
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/token.go
@@ -0,0 +1,213 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type TokenSourceCharStreamPair struct {
+ tokenSource TokenSource
+ charStream CharStream
+}
+
+// A token has properties: text, type, line, character position in the line
+// (so we can ignore tabs), token channel, index, and source from which
+// we obtained this token.
+
+type Token interface {
+ GetSource() *TokenSourceCharStreamPair
+ GetTokenType() int
+ GetChannel() int
+ GetStart() int
+ GetStop() int
+ GetLine() int
+ GetColumn() int
+
+ GetText() string
+ SetText(s string)
+
+ GetTokenIndex() int
+ SetTokenIndex(v int)
+
+ GetTokenSource() TokenSource
+ GetInputStream() CharStream
+
+ String() string
+}
+
+type BaseToken struct {
+ source *TokenSourceCharStreamPair
+ tokenType int // token type of the token
+ channel int // The parser ignores everything not on DEFAULT_CHANNEL
+ start int // optional return -1 if not implemented.
+ stop int // optional return -1 if not implemented.
+ tokenIndex int // from 0..n-1 of the token object in the input stream
+ line int // line=1..n of the 1st character
+ column int // beginning of the line at which it occurs, 0..n-1
+ text string // text of the token.
+ readOnly bool
+}
+
+const (
+ TokenInvalidType = 0
+
+ // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state
+ // and did not follow it despite needing to.
+ TokenEpsilon = -2
+
+ TokenMinUserTokenType = 1
+
+ TokenEOF = -1
+
+ // TokenDefaultChannel is the default channel upon which tokens are sent to the parser.
+ //
+ // All tokens go to the parser (unless [Skip] is called in the lexer rule)
+ // on a particular "channel". The parser tunes to a particular channel
+ // so that whitespace etc... can go to the parser on a "hidden" channel.
+ TokenDefaultChannel = 0
+
+ // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel].
+ //
+ // Anything on a different channel than TokenDefaultChannel is not parsed by parser.
+ TokenHiddenChannel = 1
+)
+
+func (b *BaseToken) GetChannel() int {
+ return b.channel
+}
+
+func (b *BaseToken) GetStart() int {
+ return b.start
+}
+
+func (b *BaseToken) GetStop() int {
+ return b.stop
+}
+
+func (b *BaseToken) GetLine() int {
+ return b.line
+}
+
+func (b *BaseToken) GetColumn() int {
+ return b.column
+}
+
+func (b *BaseToken) GetTokenType() int {
+ return b.tokenType
+}
+
+func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
+ return b.source
+}
+
+func (b *BaseToken) GetTokenIndex() int {
+ return b.tokenIndex
+}
+
+func (b *BaseToken) SetTokenIndex(v int) {
+ b.tokenIndex = v
+}
+
+func (b *BaseToken) GetTokenSource() TokenSource {
+ return b.source.tokenSource
+}
+
+func (b *BaseToken) GetInputStream() CharStream {
+ return b.source.charStream
+}
+
+type CommonToken struct {
+ BaseToken
+}
+
+func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
+
+ t := &CommonToken{
+ BaseToken: BaseToken{
+ source: source,
+ tokenType: tokenType,
+ channel: channel,
+ start: start,
+ stop: stop,
+ tokenIndex: -1,
+ },
+ }
+
+ if t.source.tokenSource != nil {
+ t.line = source.tokenSource.GetLine()
+ t.column = source.tokenSource.GetCharPositionInLine()
+ } else {
+ t.column = -1
+ }
+ return t
+}
+
+// An empty {@link Pair} which is used as the default value of
+// {@link //source} for tokens that do not have a source.
+
+//CommonToken.EMPTY_SOURCE = [ nil, nil ]
+
+// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
+//
+//
+// If {@code oldToken} is also a {@link CommonToken} instance, the newly
+// constructed token will share a reference to the {@link //text} field and
+// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
+// be assigned the result of calling {@link //GetText}, and {@link //source}
+// will be constructed from the result of {@link Token//GetTokenSource} and
+// {@link Token//GetInputStream}.
+//
+// @param oldToken The token to copy.
+func (c *CommonToken) clone() *CommonToken {
+ t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
+ t.tokenIndex = c.GetTokenIndex()
+ t.line = c.GetLine()
+ t.column = c.GetColumn()
+ t.text = c.GetText()
+ return t
+}
+
+func (c *CommonToken) GetText() string {
+ if c.text != "" {
+ return c.text
+ }
+ input := c.GetInputStream()
+ if input == nil {
+ return ""
+ }
+ n := input.Size()
+ if c.start < n && c.stop < n {
+ return input.GetTextFromInterval(NewInterval(c.start, c.stop))
+ }
+ return ""
+}
+
+func (c *CommonToken) SetText(text string) {
+ c.text = text
+}
+
+func (c *CommonToken) String() string {
+ txt := c.GetText()
+ if txt != "" {
+ txt = strings.Replace(txt, "\n", "\\n", -1)
+ txt = strings.Replace(txt, "\r", "\\r", -1)
+ txt = strings.Replace(txt, "\t", "\\t", -1)
+ } else {
+ txt = ""
+ }
+
+ var ch string
+ if c.channel > 0 {
+ ch = ",channel=" + strconv.Itoa(c.channel)
+ } else {
+ ch = ""
+ }
+
+ return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
+ txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
+ ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go b/vendor/github.com/antlr4-go/antlr/v4/token_source.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go
rename to vendor/github.com/antlr4-go/antlr/v4/token_source.go
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
similarity index 90%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
rename to vendor/github.com/antlr4-go/antlr/v4/token_stream.go
index 1527d43f6..bf4ff6633 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go
@@ -8,13 +8,14 @@ type TokenStream interface {
IntStream
LT(k int) Token
+ Reset()
Get(index int) Token
GetTokenSource() TokenSource
SetTokenSource(TokenSource)
GetAllText() string
- GetTextFromInterval(*Interval) string
+ GetTextFromInterval(Interval) string
GetTextFromRuleContext(RuleContext) string
GetTextFromTokens(Token, Token) string
}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
new file mode 100644
index 000000000..ccf59b465
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go
@@ -0,0 +1,662 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "fmt"
+)
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+const (
+ DefaultProgramName = "default"
+ ProgramInitSize = 100
+ MinTokenIndex = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instructionIndex int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ opName string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation) GetInstructionIndex() int {
+ return op.instructionIndex
+}
+
+func (op *BaseRewriteOperation) GetIndex() int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) GetText() string {
+ return op.text
+}
+
+func (op *BaseRewriteOperation) GetOpName() string {
+ return op.opName
+}
+
+func (op *BaseRewriteOperation) GetTokens() TokenStream {
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation) SetInstructionIndex(val int) {
+ op.instructionIndex = val
+}
+
+func (op *BaseRewriteOperation) SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation) SetText(val string) {
+ op.text = val
+}
+
+func (op *BaseRewriteOperation) SetOpName(val string) {
+ op.opName = val
+}
+
+func (op *BaseRewriteOperation) SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int {
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.opName,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp {
+ return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{
+ index: index,
+ text: text,
+ opName: "InsertBeforeOp",
+ tokens: stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions
+// first and then the "insert before" instructions at same index. Implementation
+// of "insert after" is "insert before index+1".
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp {
+ return &InsertAfterOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: index + 1,
+ text: text,
+ tokens: stream,
+ },
+ }
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF {
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index + 1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct {
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation: BaseRewriteOperation{
+ index: from,
+ text: text,
+ opName: "ReplaceOp",
+ tokens: stream,
+ },
+ LastIndex: to,
+ }
+}
+
+func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int {
+ if op.text != "" {
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex + 1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ lastRewriteTokenIndexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter {
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize),
+ },
+ lastRewriteTokenIndexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream {
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) {
+ is, ok := tsr.programs[programName]
+ if ok {
+ tsr.programs[programName] = is[MinTokenIndex:instructionIndex]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) {
+ tsr.Rollback(DefaultProgramName, instructionIndex)
+}
+
+// DeleteProgram Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(programName string) {
+ tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault() {
+ tsr.DeleteProgram(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) {
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) {
+ tsr.InsertAfter(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) {
+ tsr.InsertAfter(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) {
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) {
+ tsr.InsertBefore(DefaultProgramName, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) {
+ tsr.InsertBefore(programName, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) {
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() {
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(programName)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(programName, op)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) {
+ tsr.Replace(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) {
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) {
+ tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) {
+ tsr.ReplaceToken(DefaultProgramName, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) {
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) {
+ tsr.Replace(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) {
+ tsr.Delete(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) {
+ tsr.DeleteDefault(index, index)
+}
+
+func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) {
+ tsr.ReplaceToken(programName, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) {
+ tsr.DeleteToken(DefaultProgramName, from, to)
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int {
+ i, ok := tsr.lastRewriteTokenIndexes[programName]
+ if !ok {
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int {
+ return tsr.GetLastRewriteTokenIndex(DefaultProgramName)
+}
+
+func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) {
+ tsr.lastRewriteTokenIndexes[programName] = i
+}
+
+func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation {
+ is := make([]RewriteOperation, 0, ProgramInitSize)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) {
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok {
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+
+// GetTextDefault returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetTextDefault() string {
+ return tsr.GetText(
+ DefaultProgramName,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+
+// GetText returns the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string {
+ rewrites := tsr.programs[programName]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start, 0)
+ if len(rewrites) == 0 {
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i := start; i <= stop && i < tsr.tokens.Size(); {
+ op := indexToOp[i]
+ delete(indexToOp, i) // remove so any left have index size-1
+ t := tsr.tokens.Get(i)
+ if op == nil {
+ // no operation at that index, just dump token
+ if t.GetTokenType() != TokenEOF {
+ buf.WriteString(t.GetText())
+ }
+ i++ // move to next token
+ } else {
+ i = op.Execute(&buf) // execute operation and skip
+ }
+ }
+ // include stuff after end if it's last index in buffer
+ // So, if they did an insertAfter(lastValidIndex, "foo"), include
+ // foo if end==lastValidIndex.
+ if stop == tsr.tokens.Size()-1 {
+ // Scan any remaining operations after last token
+ // should be included (they will be inserts).
+ for _, op := range indexToOp {
+ if op.GetIndex() >= tsr.tokens.Size()-1 {
+ buf.WriteString(op.GetText())
+ }
+ }
+ }
+ return buf.String()
+}
+
+// reduceToSingleOperationPerIndex combines operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc...
+//
+// Here are the cases:
+//
+// I.i.u I.j.v leave alone, non-overlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this 'replace'.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the 'replace' range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// The func returns a map from token index to operation.
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation {
+ // WALK REPLACES
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ rop, ok := op.(*ReplaceOp)
+ if !ok {
+ continue
+ }
+ // Wipe prior inserts within range
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if iop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if iop.index == rop.index {
+ // E.g., insert before 2, delete 2..2; update replace
+ // text to include insert before, kill insert
+ rewrites[iop.instructionIndex] = nil
+ if rop.text != "" {
+ rop.text = iop.text + rop.text
+ } else {
+ rop.text = iop.text
+ }
+ } else if iop.index > rop.index && iop.index <= rop.LastIndex {
+ // delete insert as it's a no-op.
+ rewrites[iop.instructionIndex] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if prevop, ok := rewrites[j].(*ReplaceOp); ok {
+ if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex {
+ // delete replace as it's a no-op.
+ rewrites[prevop.instructionIndex] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint {
+ rewrites[prevop.instructionIndex] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ } else if !disjoint {
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok {
+ continue
+ }
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if nextIop, ok := rewrites[j].(*InsertAfterOp); ok {
+ if nextIop.index == iop.GetIndex() {
+ iop.SetText(nextIop.text + iop.GetText())
+ rewrites[j] = nil
+ }
+ }
+ if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok {
+ if prevIop.index == iop.GetIndex() {
+ iop.SetText(iop.GetText() + prevIop.text)
+ rewrites[prevIop.instructionIndex] = nil
+ }
+ }
+ }
+ // look for replaces where iop.index is in range; error
+ for j := 0; j < i && j < len(rewrites); j++ {
+ if rop, ok := rewrites[j].(*ReplaceOp); ok {
+ if iop.GetIndex() == rop.index {
+ rop.text = iop.GetText() + rop.text
+ rewrites[i] = nil
+ continue
+ }
+ if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex {
+ panic("insert op " + iop.String() + " within boundaries of previous " + rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i := 0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil {
+ continue
+ }
+ if _, ok := m[op.GetIndex()]; ok {
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+/*
+ Quick fixing Go lack of overloads
+*/
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ } else {
+ return b
+ }
+}
+func min(a, b int) int {
+ if a < b {
+ return a
+ } else {
+ return b
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
similarity index 100%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go
rename to vendor/github.com/antlr4-go/antlr/v4/trace_listener.go
diff --git a/vendor/github.com/antlr4-go/antlr/v4/transition.go b/vendor/github.com/antlr4-go/antlr/v4/transition.go
new file mode 100644
index 000000000..313b0fc12
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/transition.go
@@ -0,0 +1,439 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// atom, set, epsilon, action, predicate, rule transitions.
+//
+// This is a one way link. It emanates from a state (usually via a list of
+// transitions) and has a target state.
+//
+// Since we never have to change the ATN transitions once we construct it,
+// the states. We'll use the term Edge for the DFA to distinguish them from
+// ATN transitions.
+
+type Transition interface {
+ getTarget() ATNState
+ setTarget(ATNState)
+ getIsEpsilon() bool
+ getLabel() *IntervalSet
+ getSerializationType() int
+ Matches(int, int, int) bool
+}
+
+type BaseTransition struct {
+ target ATNState
+ isEpsilon bool
+ label int
+ intervalSet *IntervalSet
+ serializationType int
+}
+
+func NewBaseTransition(target ATNState) *BaseTransition {
+
+ if target == nil {
+ panic("target cannot be nil.")
+ }
+
+ t := new(BaseTransition)
+
+ t.target = target
+ // Are we epsilon, action, sempred?
+ t.isEpsilon = false
+ t.intervalSet = nil
+
+ return t
+}
+
+func (t *BaseTransition) getTarget() ATNState {
+ return t.target
+}
+
+func (t *BaseTransition) setTarget(s ATNState) {
+ t.target = s
+}
+
+func (t *BaseTransition) getIsEpsilon() bool {
+ return t.isEpsilon
+}
+
+func (t *BaseTransition) getLabel() *IntervalSet {
+ return t.intervalSet
+}
+
+func (t *BaseTransition) getSerializationType() int {
+ return t.serializationType
+}
+
+func (t *BaseTransition) Matches(_, _, _ int) bool {
+ panic("Not implemented")
+}
+
+const (
+ TransitionEPSILON = 1
+ TransitionRANGE = 2
+ TransitionRULE = 3
+ TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
+ TransitionATOM = 5
+ TransitionACTION = 6
+ TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
+ TransitionNOTSET = 8
+ TransitionWILDCARD = 9
+ TransitionPRECEDENCE = 10
+)
+
+//goland:noinspection GoUnusedGlobalVariable
+var TransitionserializationNames = []string{
+ "INVALID",
+ "EPSILON",
+ "RANGE",
+ "RULE",
+ "PREDICATE",
+ "ATOM",
+ "ACTION",
+ "SET",
+ "NOT_SET",
+ "WILDCARD",
+ "PRECEDENCE",
+}
+
+//var TransitionserializationTypes struct {
+// EpsilonTransition int
+// RangeTransition int
+// RuleTransition int
+// PredicateTransition int
+// AtomTransition int
+// ActionTransition int
+// SetTransition int
+// NotSetTransition int
+// WildcardTransition int
+// PrecedencePredicateTransition int
+//}{
+// TransitionEPSILON,
+// TransitionRANGE,
+// TransitionRULE,
+// TransitionPREDICATE,
+// TransitionATOM,
+// TransitionACTION,
+// TransitionSET,
+// TransitionNOTSET,
+// TransitionWILDCARD,
+// TransitionPRECEDENCE
+//}
+
+// AtomTransition
+// TODO: make all transitions sets? no, should remove set edges
+type AtomTransition struct {
+ BaseTransition
+}
+
+func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
+ t := &AtomTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionATOM,
+ label: intervalSet,
+ isEpsilon: false,
+ },
+ }
+ t.intervalSet = t.makeLabel()
+
+ return t
+}
+
+func (t *AtomTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addOne(t.label)
+ return s
+}
+
+func (t *AtomTransition) Matches(symbol, _, _ int) bool {
+ return t.label == symbol
+}
+
+func (t *AtomTransition) String() string {
+ return strconv.Itoa(t.label)
+}
+
+type RuleTransition struct {
+ BaseTransition
+ followState ATNState
+ ruleIndex, precedence int
+}
+
+func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
+ return &RuleTransition{
+ BaseTransition: BaseTransition{
+ target: ruleStart,
+ isEpsilon: true,
+ serializationType: TransitionRULE,
+ },
+ ruleIndex: ruleIndex,
+ precedence: precedence,
+ followState: followState,
+ }
+}
+
+func (t *RuleTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+type EpsilonTransition struct {
+ BaseTransition
+ outermostPrecedenceReturn int
+}
+
+func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
+ return &EpsilonTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionEPSILON,
+ isEpsilon: true,
+ },
+ outermostPrecedenceReturn: outermostPrecedenceReturn,
+ }
+}
+
+func (t *EpsilonTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *EpsilonTransition) String() string {
+ return "epsilon"
+}
+
+type RangeTransition struct {
+ BaseTransition
+ start, stop int
+}
+
+func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
+ t := &RangeTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionRANGE,
+ isEpsilon: false,
+ },
+ start: start,
+ stop: stop,
+ }
+ t.intervalSet = t.makeLabel()
+ return t
+}
+
+func (t *RangeTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addRange(t.start, t.stop)
+ return s
+}
+
+func (t *RangeTransition) Matches(symbol, _, _ int) bool {
+ return symbol >= t.start && symbol <= t.stop
+}
+
+func (t *RangeTransition) String() string {
+ var sb strings.Builder
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(t.start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(t.stop))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+type AbstractPredicateTransition interface {
+ Transition
+ IAbstractPredicateTransitionFoo()
+}
+
+type BaseAbstractPredicateTransition struct {
+ BaseTransition
+}
+
+func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
+ return &BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ },
+ }
+}
+
+func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
+
+type PredicateTransition struct {
+ BaseAbstractPredicateTransition
+ isCtxDependent bool
+ ruleIndex, predIndex int
+}
+
+func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
+ return &PredicateTransition{
+ BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionPREDICATE,
+ isEpsilon: true,
+ },
+ },
+ isCtxDependent: isCtxDependent,
+ ruleIndex: ruleIndex,
+ predIndex: predIndex,
+ }
+}
+
+func (t *PredicateTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *PredicateTransition) getPredicate() *Predicate {
+ return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
+}
+
+func (t *PredicateTransition) String() string {
+ return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
+}
+
+type ActionTransition struct {
+ BaseTransition
+ isCtxDependent bool
+ ruleIndex, actionIndex, predIndex int
+}
+
+func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
+ return &ActionTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionACTION,
+ isEpsilon: true,
+ },
+ isCtxDependent: isCtxDependent,
+ ruleIndex: ruleIndex,
+ actionIndex: actionIndex,
+ }
+}
+
+func (t *ActionTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *ActionTransition) String() string {
+ return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
+}
+
+type SetTransition struct {
+ BaseTransition
+}
+
+func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
+ t := &SetTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionSET,
+ },
+ }
+
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+ return t
+}
+
+func (t *SetTransition) Matches(symbol, _, _ int) bool {
+ return t.intervalSet.contains(symbol)
+}
+
+func (t *SetTransition) String() string {
+ return t.intervalSet.String()
+}
+
+type NotSetTransition struct {
+ SetTransition
+}
+
+func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
+ t := &NotSetTransition{
+ SetTransition: SetTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionNOTSET,
+ },
+ },
+ }
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+
+ return t
+}
+
+func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
+}
+
+func (t *NotSetTransition) String() string {
+ return "~" + t.intervalSet.String()
+}
+
+type WildcardTransition struct {
+ BaseTransition
+}
+
+func NewWildcardTransition(target ATNState) *WildcardTransition {
+ return &WildcardTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionWILDCARD,
+ },
+ }
+}
+
+func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
+}
+
+func (t *WildcardTransition) String() string {
+ return "."
+}
+
+type PrecedencePredicateTransition struct {
+ BaseAbstractPredicateTransition
+ precedence int
+}
+
+func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
+ return &PrecedencePredicateTransition{
+ BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{
+ BaseTransition: BaseTransition{
+ target: target,
+ serializationType: TransitionPRECEDENCE,
+ isEpsilon: true,
+ },
+ },
+ precedence: precedence,
+ }
+}
+
+func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool {
+ return false
+}
+
+func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
+ return NewPrecedencePredicate(t.precedence)
+}
+
+func (t *PrecedencePredicateTransition) String() string {
+ return fmt.Sprint(t.precedence) + " >= _p"
+}
diff --git a/vendor/github.com/antlr4-go/antlr/v4/tree.go b/vendor/github.com/antlr4-go/antlr/v4/tree.go
new file mode 100644
index 000000000..c288420fb
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/tree.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The basic notion of a tree has a parent, a payload, and a list of children.
+// It is the most abstract interface for all the trees used by ANTLR.
+///
+
+var TreeInvalidInterval = NewInterval(-1, -2)
+
+type Tree interface {
+ GetParent() Tree
+ SetParent(Tree)
+ GetPayload() interface{}
+ GetChild(i int) Tree
+ GetChildCount() int
+ GetChildren() []Tree
+}
+
+type SyntaxTree interface {
+ Tree
+ GetSourceInterval() Interval
+}
+
+type ParseTree interface {
+ SyntaxTree
+ Accept(Visitor ParseTreeVisitor) interface{}
+ GetText() string
+ ToStringTree([]string, Recognizer) string
+}
+
+type RuleNode interface {
+ ParseTree
+ GetRuleContext() RuleContext
+}
+
+type TerminalNode interface {
+ ParseTree
+ GetSymbol() Token
+}
+
+type ErrorNode interface {
+ TerminalNode
+
+ errorNode()
+}
+
+type ParseTreeVisitor interface {
+ Visit(tree ParseTree) interface{}
+ VisitChildren(node RuleNode) interface{}
+ VisitTerminal(node TerminalNode) interface{}
+ VisitErrorNode(node ErrorNode) interface{}
+}
+
+type BaseParseTreeVisitor struct{}
+
+var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
+
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
+func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil }
+
+// TODO: Implement this?
+//func (this ParseTreeVisitor) Visit(ctx) {
+// if (Utils.isArray(ctx)) {
+// self := this
+// return ctx.map(function(child) { return VisitAtom(self, child)})
+// } else {
+// return VisitAtom(this, ctx)
+// }
+//}
+//
+//func VisitAtom(Visitor, ctx) {
+// if (ctx.parser == nil) { //is terminal
+// return
+// }
+//
+// name := ctx.parser.ruleNames[ctx.ruleIndex]
+// funcName := "Visit" + Utils.titleCase(name)
+//
+// return Visitor[funcName](ctx)
+//}
+
+type ParseTreeListener interface {
+ VisitTerminal(node TerminalNode)
+ VisitErrorNode(node ErrorNode)
+ EnterEveryRule(ctx ParserRuleContext)
+ ExitEveryRule(ctx ParserRuleContext)
+}
+
+type BaseParseTreeListener struct{}
+
+var _ ParseTreeListener = &BaseParseTreeListener{}
+
+func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {}
+
+type TerminalNodeImpl struct {
+ parentCtx RuleContext
+ symbol Token
+}
+
+var _ TerminalNode = &TerminalNodeImpl{}
+
+func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
+ tn := new(TerminalNodeImpl)
+
+ tn.parentCtx = nil
+ tn.symbol = symbol
+
+ return tn
+}
+
+func (t *TerminalNodeImpl) GetChild(_ int) Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) GetChildren() []Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) SetChildren(_ []Tree) {
+ panic("Cannot set children on terminal node")
+}
+
+func (t *TerminalNodeImpl) GetSymbol() Token {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetParent() Tree {
+ return t.parentCtx
+}
+
+func (t *TerminalNodeImpl) SetParent(tree Tree) {
+ t.parentCtx = tree.(RuleContext)
+}
+
+func (t *TerminalNodeImpl) GetPayload() interface{} {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetSourceInterval() Interval {
+ if t.symbol == nil {
+ return TreeInvalidInterval
+ }
+ tokenIndex := t.symbol.GetTokenIndex()
+ return NewInterval(tokenIndex, tokenIndex)
+}
+
+func (t *TerminalNodeImpl) GetChildCount() int {
+ return 0
+}
+
+func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitTerminal(t)
+}
+
+func (t *TerminalNodeImpl) GetText() string {
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) String() string {
+ if t.symbol.GetTokenType() == TokenEOF {
+ return ""
+ }
+
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string {
+ return t.String()
+}
+
+// Represents a token that was consumed during reSynchronization
+// rather than during a valid Match operation. For example,
+// we will create this kind of a node during single token insertion
+// and deletion as well as during "consume until error recovery set"
+// upon no viable alternative exceptions.
+
+type ErrorNodeImpl struct {
+ *TerminalNodeImpl
+}
+
+var _ ErrorNode = &ErrorNodeImpl{}
+
+func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
+ en := new(ErrorNodeImpl)
+ en.TerminalNodeImpl = NewTerminalNodeImpl(token)
+ return en
+}
+
+func (e *ErrorNodeImpl) errorNode() {}
+
+func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitErrorNode(e)
+}
+
+type ParseTreeWalker struct {
+}
+
+func NewParseTreeWalker() *ParseTreeWalker {
+ return new(ParseTreeWalker)
+}
+
+// Walk performs a walk on the given parse tree starting at the root and going down recursively
+// with depth-first search. On each node, [EnterRule] is called before
+// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up.
+func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ switch tt := t.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ p.EnterRule(listener, t.(RuleNode))
+ for i := 0; i < t.GetChildCount(); i++ {
+ child := t.GetChild(i)
+ p.Walk(listener, child)
+ }
+ p.ExitRule(listener, t.(RuleNode))
+ }
+}
+
+// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule]
+// then by triggering the event specific to the given parse tree node
+func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+}
+
+// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node
+// then by triggering the generic event [ParseTreeListener].ExitEveryRule
+func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+}
+
+//goland:noinspection GoUnusedGlobalVariable
+var ParseTreeWalkerDefault = NewParseTreeWalker()
+
+type IterativeParseTreeWalker struct {
+ *ParseTreeWalker
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func NewIterativeParseTreeWalker() *IterativeParseTreeWalker {
+ return new(IterativeParseTreeWalker)
+}
+
+func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ var stack []Tree
+ var indexStack []int
+ currentNode := t
+ currentIndex := 0
+
+ for currentNode != nil {
+ // pre-order visit
+ switch tt := currentNode.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ i.EnterRule(listener, currentNode.(RuleNode))
+ }
+ // Move down to first child, if exists
+ if currentNode.GetChildCount() > 0 {
+ stack = append(stack, currentNode)
+ indexStack = append(indexStack, currentIndex)
+ currentIndex = 0
+ currentNode = currentNode.GetChild(0)
+ continue
+ }
+
+ for {
+ // post-order visit
+ if ruleNode, ok := currentNode.(RuleNode); ok {
+ i.ExitRule(listener, ruleNode)
+ }
+ // No parent, so no siblings
+ if len(stack) == 0 {
+ currentNode = nil
+ currentIndex = 0
+ break
+ }
+ // Move to next sibling if possible
+ currentIndex++
+ if stack[len(stack)-1].GetChildCount() > currentIndex {
+ currentNode = stack[len(stack)-1].GetChild(currentIndex)
+ break
+ }
+ // No next, sibling, so move up
+ currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1]
+ currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1]
+ }
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go b/vendor/github.com/antlr4-go/antlr/v4/trees.go
similarity index 81%
rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
rename to vendor/github.com/antlr4-go/antlr/v4/trees.go
index d7dbb0322..f44c05d81 100644
--- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go
+++ b/vendor/github.com/antlr4-go/antlr/v4/trees.go
@@ -8,10 +8,8 @@ import "fmt"
/** A set of utility routines useful for all kinds of ANTLR trees. */
-// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
-//
-// node payloads to get the text for the nodes. Detect
-// parse trees and extract data appropriately.
+// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the
+// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately.
func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
if recog != nil {
@@ -32,7 +30,7 @@ func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
}
for i := 1; i < c; i++ {
s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
- res += (" " + s)
+ res += " " + s
}
res += ")"
return res
@@ -62,7 +60,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
}
}
- // no recog for rule names
+ // no recognition for rule names
payload := t.GetPayload()
if p2, ok := payload.(Token); ok {
return p2.GetText()
@@ -71,7 +69,9 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
return fmt.Sprint(t.GetPayload())
}
-// Return ordered list of all children of this node
+// TreesGetChildren returns am ordered list of all children of this node
+//
+//goland:noinspection GoUnusedExportedFunction
func TreesGetChildren(t Tree) []Tree {
list := make([]Tree, 0)
for i := 0; i < t.GetChildCount(); i++ {
@@ -80,9 +80,10 @@ func TreesGetChildren(t Tree) []Tree {
return list
}
-// Return a list of all ancestors of this node. The first node of
+// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root
+// and the last node is the parent of this node.
//
-// list is the root and the last is the parent of this node.
+//goland:noinspection GoUnusedExportedFunction
func TreesgetAncestors(t Tree) []Tree {
ancestors := make([]Tree, 0)
t = t.GetParent()
@@ -94,10 +95,12 @@ func TreesgetAncestors(t Tree) []Tree {
return ancestors
}
+//goland:noinspection GoUnusedExportedFunction
func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
return TreesfindAllNodes(t, ttype, true)
}
+//goland:noinspection GoUnusedExportedFunction
func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
return TreesfindAllNodes(t, ruleIndex, false)
}
@@ -129,6 +132,7 @@ func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTr
}
}
+//goland:noinspection GoUnusedExportedFunction
func TreesDescendants(t ParseTree) []ParseTree {
nodes := []ParseTree{t}
for i := 0; i < t.GetChildCount(); i++ {
diff --git a/vendor/github.com/antlr4-go/antlr/v4/utils.go b/vendor/github.com/antlr4-go/antlr/v4/utils.go
new file mode 100644
index 000000000..733d7df9d
--- /dev/null
+++ b/vendor/github.com/antlr4-go/antlr/v4/utils.go
@@ -0,0 +1,328 @@
+// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+ "os"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+func intMin(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func intMax(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// A simple integer stack
+
+type IntStack []int
+
+var ErrEmptyStack = errors.New("stack is empty")
+
+func (s *IntStack) Pop() (int, error) {
+ l := len(*s) - 1
+ if l < 0 {
+ return 0, ErrEmptyStack
+ }
+ v := (*s)[l]
+ *s = (*s)[0:l]
+ return v, nil
+}
+
+func (s *IntStack) Push(e int) {
+ *s = append(*s, e)
+}
+
+const bitsPerWord = 64
+
+func indexForBit(bit int) int {
+ return bit / bitsPerWord
+}
+
+//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction
+func wordForBit(data []uint64, bit int) uint64 {
+ idx := indexForBit(bit)
+ if idx >= len(data) {
+ return 0
+ }
+ return data[idx]
+}
+
+func maskForBit(bit int) uint64 {
+ return uint64(1) << (bit % bitsPerWord)
+}
+
+func wordsNeeded(bit int) int {
+ return indexForBit(bit) + 1
+}
+
+type BitSet struct {
+ data []uint64
+}
+
+// NewBitSet creates a new bitwise set
+// TODO: See if we can replace with the standard library's BitSet
+func NewBitSet() *BitSet {
+ return &BitSet{}
+}
+
+func (b *BitSet) add(value int) {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ size := wordsNeeded(value)
+ data := make([]uint64, size)
+ copy(data, b.data)
+ b.data = data
+ }
+ b.data[idx] |= maskForBit(value)
+}
+
+func (b *BitSet) clear(index int) {
+ idx := indexForBit(index)
+ if idx >= len(b.data) {
+ return
+ }
+ b.data[idx] &= ^maskForBit(index)
+}
+
+func (b *BitSet) or(set *BitSet) {
+ // Get min size necessary to represent the bits in both sets.
+ bLen := b.minLen()
+ setLen := set.minLen()
+ maxLen := intMax(bLen, setLen)
+ if maxLen > len(b.data) {
+ // Increase the size of len(b.data) to represent the bits in both sets.
+ data := make([]uint64, maxLen)
+ copy(data, b.data)
+ b.data = data
+ }
+ // len(b.data) is at least setLen.
+ for i := 0; i < setLen; i++ {
+ b.data[i] |= set.data[i]
+ }
+}
+
+func (b *BitSet) remove(value int) {
+ b.clear(value)
+}
+
+func (b *BitSet) contains(value int) bool {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ return false
+ }
+ return (b.data[idx] & maskForBit(value)) != 0
+}
+
+func (b *BitSet) minValue() int {
+ for i, v := range b.data {
+ if v == 0 {
+ continue
+ }
+ return i*bitsPerWord + bits.TrailingZeros64(v)
+ }
+ return 2147483647
+}
+
+func (b *BitSet) equals(other interface{}) bool {
+ otherBitSet, ok := other.(*BitSet)
+ if !ok {
+ return false
+ }
+
+ if b == otherBitSet {
+ return true
+ }
+
+ // We only compare set bits, so we cannot rely on the two slices having the same size. Its
+ // possible for two BitSets to have different slice lengths but the same set bits. So we only
+ // compare the relevant words and ignore the trailing zeros.
+ bLen := b.minLen()
+ otherLen := otherBitSet.minLen()
+
+ if bLen != otherLen {
+ return false
+ }
+
+ for i := 0; i < bLen; i++ {
+ if b.data[i] != otherBitSet.data[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *BitSet) minLen() int {
+ for i := len(b.data); i > 0; i-- {
+ if b.data[i-1] != 0 {
+ return i
+ }
+ }
+ return 0
+}
+
+func (b *BitSet) length() int {
+ cnt := 0
+ for _, val := range b.data {
+ cnt += bits.OnesCount64(val)
+ }
+ return cnt
+}
+
+func (b *BitSet) String() string {
+ vals := make([]string, 0, b.length())
+
+ for i, v := range b.data {
+ for v != 0 {
+ n := bits.TrailingZeros64(v)
+ vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
+ v &= ^(uint64(1) << n)
+ }
+ }
+
+ return "{" + strings.Join(vals, ", ") + "}"
+}
+
+type AltDict struct {
+ data map[string]interface{}
+}
+
+func NewAltDict() *AltDict {
+ d := new(AltDict)
+ d.data = make(map[string]interface{})
+ return d
+}
+
+func (a *AltDict) Get(key string) interface{} {
+ key = "k-" + key
+ return a.data[key]
+}
+
+func (a *AltDict) put(key string, value interface{}) {
+ key = "k-" + key
+ a.data[key] = value
+}
+
+func (a *AltDict) values() []interface{} {
+ vs := make([]interface{}, len(a.data))
+ i := 0
+ for _, v := range a.data {
+ vs[i] = v
+ i++
+ }
+ return vs
+}
+
+func EscapeWhitespace(s string, escapeSpaces bool) string {
+
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ if escapeSpaces {
+ s = strings.Replace(s, " ", "\u00B7", -1)
+ }
+ return s
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func TerminalNodeToStringArray(sa []TerminalNode) []string {
+ st := make([]string, len(sa))
+
+ for i, s := range sa {
+ st[i] = fmt.Sprintf("%v", s)
+ }
+
+ return st
+}
+
+//goland:noinspection GoUnusedExportedFunction
+func PrintArrayJavaStyle(sa []string) string {
+ var buffer bytes.Buffer
+
+ buffer.WriteString("[")
+
+ for i, s := range sa {
+ buffer.WriteString(s)
+ if i != len(sa)-1 {
+ buffer.WriteString(", ")
+ }
+ }
+
+ buffer.WriteString("]")
+
+ return buffer.String()
+}
+
+// murmur hash
+func murmurInit(seed int) int {
+ return seed
+}
+
+func murmurUpdate(h int, value int) int {
+ const c1 uint32 = 0xCC9E2D51
+ const c2 uint32 = 0x1B873593
+ const r1 uint32 = 15
+ const r2 uint32 = 13
+ const m uint32 = 5
+ const n uint32 = 0xE6546B64
+
+ k := uint32(value)
+ k *= c1
+ k = (k << r1) | (k >> (32 - r1))
+ k *= c2
+
+ hash := uint32(h) ^ k
+ hash = (hash << r2) | (hash >> (32 - r2))
+ hash = hash*m + n
+ return int(hash)
+}
+
+func murmurFinish(h int, numberOfWords int) int {
+ var hash = uint32(h)
+ hash ^= uint32(numberOfWords) << 2
+ hash ^= hash >> 16
+ hash *= 0x85ebca6b
+ hash ^= hash >> 13
+ hash *= 0xc2b2ae35
+ hash ^= hash >> 16
+
+ return int(hash)
+}
+
+func isDirectory(dir string) (bool, error) {
+ fileInfo, err := os.Stat(dir)
+ if err != nil {
+ switch {
+ case errors.Is(err, syscall.ENOENT):
+ // The given directory does not exist, so we will try to create it
+ //
+ err = os.MkdirAll(dir, 0755)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+ case err != nil:
+ return false, err
+ default:
+ }
+ }
+ return fileInfo.IsDir(), err
+}
diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore
deleted file mode 100644
index 8d69a9418..000000000
--- a/vendor/github.com/asaskevich/govalidator/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-bin/
-.idea/
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, built with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml
deleted file mode 100644
index bb83c6670..000000000
--- a/vendor/github.com/asaskevich/govalidator/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-dist: xenial
-go:
- - '1.10'
- - '1.11'
- - '1.12'
- - '1.13'
- - 'tip'
-
-script:
- - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
deleted file mode 100644
index 4b462b0d8..000000000
--- a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Contributor Code of Conduct
-
-This project adheres to [The Code Manifesto](http://codemanifesto.com)
-as its guidelines for contributor interactions.
-
-## The Code Manifesto
-
-We want to work in an ecosystem that empowers developers to reach their
-potential — one that encourages growth and effective collaboration. A space
-that is safe for all.
-
-A space such as this benefits everyone that participates in it. It encourages
-new developers to enter our field. It is through discussion and collaboration
-that we grow, and through growth that we improve.
-
-In the effort to create such a place, we hold to these values:
-
-1. **Discrimination limits us.** This includes discrimination on the basis of
- race, gender, sexual orientation, gender identity, age, nationality,
- technology and any other arbitrary exclusion of a group of people.
-2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort
- levels. Remember that, and if brought to your attention, heed it.
-3. **We are our biggest assets.** None of us were born masters of our trade.
- Each of us has been helped along the way. Return that favor, when and where
- you can.
-4. **We are resources for the future.** As an extension of #3, share what you
- know. Make yourself a resource to help those that come after you.
-5. **Respect defines us.** Treat others as you wish to be treated. Make your
- discussions, criticisms and debates from a position of respectfulness. Ask
- yourself, is it true? Is it necessary? Is it constructive? Anything less is
- unacceptable.
-6. **Reactions require grace.** Angry responses are valid, but abusive language
- and vindictive actions are toxic. When something happens that offends you,
- handle it assertively, but be respectful. Escalate reasonably, and try to
- allow the offender an opportunity to explain themselves, and possibly
- correct the issue.
-7. **Opinions are just that: opinions.** Each and every one of us, due to our
- background and upbringing, have varying opinions. That is perfectly
- acceptable. Remember this: if you respect your own opinions, you should
- respect the opinions of others.
-8. **To err is human.** You might not intend it, but mistakes do happen and
- contribute to build experience. Tolerate honest mistakes, and don't
- hesitate to apologize if you make one yourself.
diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
deleted file mode 100644
index 7ed268a1e..000000000
--- a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
+++ /dev/null
@@ -1,63 +0,0 @@
-#### Support
-If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
-
-#### What to contribute
-If you don't know what to do, there are some features and functions that need to be done
-
-- [ ] Refactor code
-- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
-- [ ] Create actual list of contributors and projects that currently using this package
-- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
-- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
-- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
-- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
-- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
-- [ ] Implement fuzzing testing
-- [ ] Implement some struct/map/array utilities
-- [ ] Implement map/array validation
-- [ ] Implement benchmarking
-- [ ] Implement batch of examples
-- [ ] Look at forks for new features and fixes
-
-#### Advice
-Feel free to create what you want, but keep in mind when you implement new features:
-- Code must be clear and readable, names of variables/constants clearly describes what they are doing
-- Public functions must be documented and described in source file and added to README.md to the list of available functions
-- There are must be unit-tests for any new functions and improvements
-
-## Financial contributions
-
-We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
-Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
-
-
-## Credits
-
-
-### Contributors
-
-Thank you to all the people who have already contributed to govalidator!
-
-
-
-### Backers
-
-Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
-
-
-
-
-### Sponsors
-
-Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE
deleted file mode 100644
index cacba9102..000000000
--- a/vendor/github.com/asaskevich/govalidator/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014-2020 Alex Saskevich
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md
deleted file mode 100644
index 2c3fc35eb..000000000
--- a/vendor/github.com/asaskevich/govalidator/README.md
+++ /dev/null
@@ -1,622 +0,0 @@
-govalidator
-===========
-[](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [](https://godoc.org/github.com/asaskevich/govalidator)
-[](https://travis-ci.org/asaskevich/govalidator)
-[](https://codecov.io/gh/asaskevich/govalidator) [](https://goreportcard.com/report/github.com/asaskevich/govalidator) [](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [](#backers) [](#sponsors) [](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
-
-A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
-
-#### Installation
-Make sure that Go is installed on your computer.
-Type the following command in your terminal:
-
- go get github.com/asaskevich/govalidator
-
-or you can get specified release of the package with `gopkg.in`:
-
- go get gopkg.in/asaskevich/govalidator.v10
-
-After it the package is ready to use.
-
-
-#### Import package in your project
-Add following line in your `*.go` file:
-```go
-import "github.com/asaskevich/govalidator"
-```
-If you are unhappy to use long `govalidator`, you can do something like this:
-```go
-import (
- valid "github.com/asaskevich/govalidator"
-)
-```
-
-#### Activate behavior to require all fields have a validation tag by default
-`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
-
-`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
-
-```go
-import "github.com/asaskevich/govalidator"
-
-func init() {
- govalidator.SetFieldsRequiredByDefault(true)
-}
-```
-
-Here's some code to explain it:
-```go
-// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
-type exampleStruct struct {
- Name string ``
- Email string `valid:"email"`
-}
-
-// this, however, will only fail when Email is empty or an invalid email address:
-type exampleStruct2 struct {
- Name string `valid:"-"`
- Email string `valid:"email"`
-}
-
-// lastly, this will only fail when Email is an invalid email address but not when it's empty:
-type exampleStruct2 struct {
- Name string `valid:"-"`
- Email string `valid:"email,optional"`
-}
-```
-
-#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
-##### Custom validator function signature
-A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
-```go
-import "github.com/asaskevich/govalidator"
-
-// old signature
-func(i interface{}) bool
-
-// new signature
-func(i interface{}, o interface{}) bool
-```
-
-##### Adding a custom validator
-This was changed to prevent data races when accessing custom validators.
-```go
-import "github.com/asaskevich/govalidator"
-
-// before
-govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool {
- // ...
-}
-
-// after
-govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool {
- // ...
-})
-```
-
-#### List of functions:
-```go
-func Abs(value float64) float64
-func BlackList(str, chars string) string
-func ByteLength(str string, params ...string) bool
-func CamelCaseToUnderscore(str string) string
-func Contains(str, substring string) bool
-func Count(array []interface{}, iterator ConditionIterator) int
-func Each(array []interface{}, iterator Iterator)
-func ErrorByField(e error, field string) string
-func ErrorsByField(e error) map[string]string
-func Filter(array []interface{}, iterator ConditionIterator) []interface{}
-func Find(array []interface{}, iterator ConditionIterator) interface{}
-func GetLine(s string, index int) (string, error)
-func GetLines(s string) []string
-func HasLowerCase(str string) bool
-func HasUpperCase(str string) bool
-func HasWhitespace(str string) bool
-func HasWhitespaceOnly(str string) bool
-func InRange(value interface{}, left interface{}, right interface{}) bool
-func InRangeFloat32(value, left, right float32) bool
-func InRangeFloat64(value, left, right float64) bool
-func InRangeInt(value, left, right interface{}) bool
-func IsASCII(str string) bool
-func IsAlpha(str string) bool
-func IsAlphanumeric(str string) bool
-func IsBase64(str string) bool
-func IsByteLength(str string, min, max int) bool
-func IsCIDR(str string) bool
-func IsCRC32(str string) bool
-func IsCRC32b(str string) bool
-func IsCreditCard(str string) bool
-func IsDNSName(str string) bool
-func IsDataURI(str string) bool
-func IsDialString(str string) bool
-func IsDivisibleBy(str, num string) bool
-func IsEmail(str string) bool
-func IsExistingEmail(email string) bool
-func IsFilePath(str string) (bool, int)
-func IsFloat(str string) bool
-func IsFullWidth(str string) bool
-func IsHalfWidth(str string) bool
-func IsHash(str string, algorithm string) bool
-func IsHexadecimal(str string) bool
-func IsHexcolor(str string) bool
-func IsHost(str string) bool
-func IsIP(str string) bool
-func IsIPv4(str string) bool
-func IsIPv6(str string) bool
-func IsISBN(str string, version int) bool
-func IsISBN10(str string) bool
-func IsISBN13(str string) bool
-func IsISO3166Alpha2(str string) bool
-func IsISO3166Alpha3(str string) bool
-func IsISO4217(str string) bool
-func IsISO693Alpha2(str string) bool
-func IsISO693Alpha3b(str string) bool
-func IsIn(str string, params ...string) bool
-func IsInRaw(str string, params ...string) bool
-func IsInt(str string) bool
-func IsJSON(str string) bool
-func IsLatitude(str string) bool
-func IsLongitude(str string) bool
-func IsLowerCase(str string) bool
-func IsMAC(str string) bool
-func IsMD4(str string) bool
-func IsMD5(str string) bool
-func IsMagnetURI(str string) bool
-func IsMongoID(str string) bool
-func IsMultibyte(str string) bool
-func IsNatural(value float64) bool
-func IsNegative(value float64) bool
-func IsNonNegative(value float64) bool
-func IsNonPositive(value float64) bool
-func IsNotNull(str string) bool
-func IsNull(str string) bool
-func IsNumeric(str string) bool
-func IsPort(str string) bool
-func IsPositive(value float64) bool
-func IsPrintableASCII(str string) bool
-func IsRFC3339(str string) bool
-func IsRFC3339WithoutZone(str string) bool
-func IsRGBcolor(str string) bool
-func IsRegex(str string) bool
-func IsRequestURI(rawurl string) bool
-func IsRequestURL(rawurl string) bool
-func IsRipeMD128(str string) bool
-func IsRipeMD160(str string) bool
-func IsRsaPub(str string, params ...string) bool
-func IsRsaPublicKey(str string, keylen int) bool
-func IsSHA1(str string) bool
-func IsSHA256(str string) bool
-func IsSHA384(str string) bool
-func IsSHA512(str string) bool
-func IsSSN(str string) bool
-func IsSemver(str string) bool
-func IsTiger128(str string) bool
-func IsTiger160(str string) bool
-func IsTiger192(str string) bool
-func IsTime(str string, format string) bool
-func IsType(v interface{}, params ...string) bool
-func IsURL(str string) bool
-func IsUTFDigit(str string) bool
-func IsUTFLetter(str string) bool
-func IsUTFLetterNumeric(str string) bool
-func IsUTFNumeric(str string) bool
-func IsUUID(str string) bool
-func IsUUIDv3(str string) bool
-func IsUUIDv4(str string) bool
-func IsUUIDv5(str string) bool
-func IsULID(str string) bool
-func IsUnixTime(str string) bool
-func IsUpperCase(str string) bool
-func IsVariableWidth(str string) bool
-func IsWhole(value float64) bool
-func LeftTrim(str, chars string) string
-func Map(array []interface{}, iterator ResultIterator) []interface{}
-func Matches(str, pattern string) bool
-func MaxStringLength(str string, params ...string) bool
-func MinStringLength(str string, params ...string) bool
-func NormalizeEmail(str string) (string, error)
-func PadBoth(str string, padStr string, padLen int) string
-func PadLeft(str string, padStr string, padLen int) string
-func PadRight(str string, padStr string, padLen int) string
-func PrependPathToErrors(err error, path string) error
-func Range(str string, params ...string) bool
-func RemoveTags(s string) string
-func ReplacePattern(str, pattern, replace string) string
-func Reverse(s string) string
-func RightTrim(str, chars string) string
-func RuneLength(str string, params ...string) bool
-func SafeFileName(str string) string
-func SetFieldsRequiredByDefault(value bool)
-func SetNilPtrAllowedByRequired(value bool)
-func Sign(value float64) float64
-func StringLength(str string, params ...string) bool
-func StringMatches(s string, params ...string) bool
-func StripLow(str string, keepNewLines bool) string
-func ToBoolean(str string) (bool, error)
-func ToFloat(str string) (float64, error)
-func ToInt(value interface{}) (res int64, err error)
-func ToJSON(obj interface{}) (string, error)
-func ToString(obj interface{}) string
-func Trim(str, chars string) string
-func Truncate(str string, length int, ending string) string
-func TruncatingErrorf(str string, args ...interface{}) error
-func UnderscoreToCamelCase(s string) string
-func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error)
-func ValidateStruct(s interface{}) (bool, error)
-func WhiteList(str, chars string) string
-type ConditionIterator
-type CustomTypeValidator
-type Error
-func (e Error) Error() string
-type Errors
-func (es Errors) Error() string
-func (es Errors) Errors() []error
-type ISO3166Entry
-type ISO693Entry
-type InterfaceParamValidator
-type Iterator
-type ParamValidator
-type ResultIterator
-type UnsupportedTypeError
-func (e *UnsupportedTypeError) Error() string
-type Validator
-```
-
-#### Examples
-###### IsURL
-```go
-println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
-```
-###### IsType
-```go
-println(govalidator.IsType("Bob", "string"))
-println(govalidator.IsType(1, "int"))
-i := 1
-println(govalidator.IsType(&i, "*int"))
-```
-
-IsType can be used through the tag `type` which is essential for map validation:
-```go
-type User struct {
- Name string `valid:"type(string)"`
- Age int `valid:"type(int)"`
- Meta interface{} `valid:"type(string)"`
-}
-result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"})
-if err != nil {
- println("error: " + err.Error())
-}
-println(result)
-```
-###### ToString
-```go
-type User struct {
- FirstName string
- LastName string
-}
-
-str := govalidator.ToString(&User{"John", "Juan"})
-println(str)
-```
-###### Each, Map, Filter, Count for slices
-Each iterates over the slice/array and calls Iterator for every item
-```go
-data := []interface{}{1, 2, 3, 4, 5}
-var fn govalidator.Iterator = func(value interface{}, index int) {
- println(value.(int))
-}
-govalidator.Each(data, fn)
-```
-```go
-data := []interface{}{1, 2, 3, 4, 5}
-var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
- return value.(int) * 3
-}
-_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
-```
-```go
-data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
-var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
- return value.(int)%2 == 0
-}
-_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
-_ = govalidator.Count(data, fn) // result = 5
-```
-###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
-If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
-```go
-govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
- return str == "duck"
-})
-```
-For completely custom validators (interface-based), see below.
-
-Here is a list of available validators for struct fields (validator - used function):
-```go
-"email": IsEmail,
-"url": IsURL,
-"dialstring": IsDialString,
-"requrl": IsRequestURL,
-"requri": IsRequestURI,
-"alpha": IsAlpha,
-"utfletter": IsUTFLetter,
-"alphanum": IsAlphanumeric,
-"utfletternum": IsUTFLetterNumeric,
-"numeric": IsNumeric,
-"utfnumeric": IsUTFNumeric,
-"utfdigit": IsUTFDigit,
-"hexadecimal": IsHexadecimal,
-"hexcolor": IsHexcolor,
-"rgbcolor": IsRGBcolor,
-"lowercase": IsLowerCase,
-"uppercase": IsUpperCase,
-"int": IsInt,
-"float": IsFloat,
-"null": IsNull,
-"uuid": IsUUID,
-"uuidv3": IsUUIDv3,
-"uuidv4": IsUUIDv4,
-"uuidv5": IsUUIDv5,
-"creditcard": IsCreditCard,
-"isbn10": IsISBN10,
-"isbn13": IsISBN13,
-"json": IsJSON,
-"multibyte": IsMultibyte,
-"ascii": IsASCII,
-"printableascii": IsPrintableASCII,
-"fullwidth": IsFullWidth,
-"halfwidth": IsHalfWidth,
-"variablewidth": IsVariableWidth,
-"base64": IsBase64,
-"datauri": IsDataURI,
-"ip": IsIP,
-"port": IsPort,
-"ipv4": IsIPv4,
-"ipv6": IsIPv6,
-"dns": IsDNSName,
-"host": IsHost,
-"mac": IsMAC,
-"latitude": IsLatitude,
-"longitude": IsLongitude,
-"ssn": IsSSN,
-"semver": IsSemver,
-"rfc3339": IsRFC3339,
-"rfc3339WithoutZone": IsRFC3339WithoutZone,
-"ISO3166Alpha2": IsISO3166Alpha2,
-"ISO3166Alpha3": IsISO3166Alpha3,
-"ulid": IsULID,
-```
-Validators with parameters
-
-```go
-"range(min|max)": Range,
-"length(min|max)": ByteLength,
-"runelength(min|max)": RuneLength,
-"stringlength(min|max)": StringLength,
-"matches(pattern)": StringMatches,
-"in(string1|string2|...|stringN)": IsIn,
-"rsapub(keylength)" : IsRsaPub,
-"minstringlength(int): MinStringLength,
-"maxstringlength(int): MaxStringLength,
-```
-Validators with parameters for any type
-
-```go
-"type(type)": IsType,
-```
-
-And here is small example of usage:
-```go
-type Post struct {
- Title string `valid:"alphanum,required"`
- Message string `valid:"duck,ascii"`
- Message2 string `valid:"animal(dog)"`
- AuthorIP string `valid:"ipv4"`
- Date string `valid:"-"`
-}
-post := &Post{
- Title: "My Example Post",
- Message: "duck",
- Message2: "dog",
- AuthorIP: "123.234.54.3",
-}
-
-// Add your own struct validation tags
-govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
- return str == "duck"
-})
-
-// Add your own struct validation tags with parameter
-govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool {
- species := params[0]
- return str == species
-})
-govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$")
-
-result, err := govalidator.ValidateStruct(post)
-if err != nil {
- println("error: " + err.Error())
-}
-println(result)
-```
-###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338)
-If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}`
-
-So here is small example of usage:
-```go
-var mapTemplate = map[string]interface{}{
- "name":"required,alpha",
- "family":"required,alpha",
- "email":"required,email",
- "cell-phone":"numeric",
- "address":map[string]interface{}{
- "line1":"required,alphanum",
- "line2":"alphanum",
- "postal-code":"numeric",
- },
-}
-
-var inputMap = map[string]interface{}{
- "name":"Bob",
- "family":"Smith",
- "email":"foo@bar.baz",
- "address":map[string]interface{}{
- "line1":"",
- "line2":"",
- "postal-code":"",
- },
-}
-
-result, err := govalidator.ValidateMap(inputMap, mapTemplate)
-if err != nil {
- println("error: " + err.Error())
-}
-println(result)
-```
-
-###### WhiteList
-```go
-// Remove all characters from string ignoring characters between "a" and "z"
-println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
-```
-
-###### Custom validation functions
-Custom validation using your own domain specific validators is also available - here's an example of how to use it:
-```go
-import "github.com/asaskevich/govalidator"
-
-type CustomByteArray [6]byte // custom types are supported and can be validated
-
-type StructWithCustomByteArray struct {
- ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
- Email string `valid:"email"`
- CustomMinLength int `valid:"-"`
-}
-
-govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool {
- switch v := context.(type) { // you can type switch on the context interface being validated
- case StructWithCustomByteArray:
- // you can check and validate against some other field in the context,
- // return early or not validate against the context at all – your choice
- case SomeOtherType:
- // ...
- default:
- // expecting some other type? Throw/panic here or continue
- }
-
- switch v := i.(type) { // type switch on the struct field being validated
- case CustomByteArray:
- for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
- if e != 0 {
- return true
- }
- }
- }
- return false
-})
-govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool {
- switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
- case StructWithCustomByteArray:
- return len(v.ID) >= v.CustomMinLength
- }
- return false
-})
-```
-
-###### Loop over Error()
-By default .Error() returns all errors in a single String. To access each error you can do this:
-```go
- if err != nil {
- errs := err.(govalidator.Errors).Errors()
- for _, e := range errs {
- fmt.Println(e.Error())
- }
- }
-```
-
-###### Custom error messages
-Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
-```go
-type Ticket struct {
- Id int64 `json:"id"`
- FirstName string `json:"firstname" valid:"required~First name is blank"`
-}
-```
-
-#### Notes
-Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
-Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
-
-#### Support
-If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
-
-#### What to contribute
-If you don't know what to do, there are some features and functions that need to be done
-
-- [ ] Refactor code
-- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
-- [ ] Create actual list of contributors and projects that currently using this package
-- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
-- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
-- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
-- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
-- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
-- [ ] Implement fuzzing testing
-- [ ] Implement some struct/map/array utilities
-- [ ] Implement map/array validation
-- [ ] Implement benchmarking
-- [ ] Implement batch of examples
-- [ ] Look at forks for new features and fixes
-
-#### Advice
-Feel free to create what you want, but keep in mind when you implement new features:
-- Code must be clear and readable, names of variables/constants clearly describes what they are doing
-- Public functions must be documented and described in source file and added to README.md to the list of available functions
-- There are must be unit-tests for any new functions and improvements
-
-## Credits
-### Contributors
-
-This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
-
-#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
-* [Daniel Lohse](https://github.com/annismckenzie)
-* [Attila Oláh](https://github.com/attilaolah)
-* [Daniel Korner](https://github.com/Dadie)
-* [Steven Wilkin](https://github.com/stevenwilkin)
-* [Deiwin Sarjas](https://github.com/deiwin)
-* [Noah Shibley](https://github.com/slugmobile)
-* [Nathan Davies](https://github.com/nathj07)
-* [Matt Sanford](https://github.com/mzsanford)
-* [Simon ccl1115](https://github.com/ccl1115)
-
-
-
-
-### Backers
-
-Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)]
-
-
-
-
-### Sponsors
-
-Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-## License
-[](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go
deleted file mode 100644
index 3e1da7cb4..000000000
--- a/vendor/github.com/asaskevich/govalidator/arrays.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package govalidator
-
-// Iterator is the function that accepts element of slice/array and its index
-type Iterator func(interface{}, int)
-
-// ResultIterator is the function that accepts element of slice/array and its index and returns any result
-type ResultIterator func(interface{}, int) interface{}
-
-// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
-type ConditionIterator func(interface{}, int) bool
-
-// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values
-type ReduceIterator func(interface{}, interface{}) interface{}
-
-// Some validates that any item of array corresponds to ConditionIterator. Returns boolean.
-func Some(array []interface{}, iterator ConditionIterator) bool {
- res := false
- for index, data := range array {
- res = res || iterator(data, index)
- }
- return res
-}
-
-// Every validates that every item of array corresponds to ConditionIterator. Returns boolean.
-func Every(array []interface{}, iterator ConditionIterator) bool {
- res := true
- for index, data := range array {
- res = res && iterator(data, index)
- }
- return res
-}
-
-// Reduce boils down a list of values into a single value by ReduceIterator
-func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} {
- for _, data := range array {
- initialValue = iterator(initialValue, data)
- }
- return initialValue
-}
-
-// Each iterates over the slice and apply Iterator to every item
-func Each(array []interface{}, iterator Iterator) {
- for index, data := range array {
- iterator(data, index)
- }
-}
-
-// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
-func Map(array []interface{}, iterator ResultIterator) []interface{} {
- var result = make([]interface{}, len(array))
- for index, data := range array {
- result[index] = iterator(data, index)
- }
- return result
-}
-
-// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
-func Find(array []interface{}, iterator ConditionIterator) interface{} {
- for index, data := range array {
- if iterator(data, index) {
- return data
- }
- }
- return nil
-}
-
-// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
-func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
- var result = make([]interface{}, 0)
- for index, data := range array {
- if iterator(data, index) {
- result = append(result, data)
- }
- }
- return result
-}
-
-// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
-func Count(array []interface{}, iterator ConditionIterator) int {
- count := 0
- for index, data := range array {
- if iterator(data, index) {
- count = count + 1
- }
- }
- return count
-}
diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go
deleted file mode 100644
index d68e990fc..000000000
--- a/vendor/github.com/asaskevich/govalidator/converter.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package govalidator
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "strconv"
-)
-
-// ToString convert the input to a string.
-func ToString(obj interface{}) string {
- res := fmt.Sprintf("%v", obj)
- return res
-}
-
-// ToJSON convert the input to a valid JSON string
-func ToJSON(obj interface{}) (string, error) {
- res, err := json.Marshal(obj)
- if err != nil {
- res = []byte("")
- }
- return string(res), err
-}
-
-// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
-func ToFloat(value interface{}) (res float64, err error) {
- val := reflect.ValueOf(value)
-
- switch value.(type) {
- case int, int8, int16, int32, int64:
- res = float64(val.Int())
- case uint, uint8, uint16, uint32, uint64:
- res = float64(val.Uint())
- case float32, float64:
- res = val.Float()
- case string:
- res, err = strconv.ParseFloat(val.String(), 64)
- if err != nil {
- res = 0
- }
- default:
- err = fmt.Errorf("ToInt: unknown interface type %T", value)
- res = 0
- }
-
- return
-}
-
-// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
-func ToInt(value interface{}) (res int64, err error) {
- val := reflect.ValueOf(value)
-
- switch value.(type) {
- case int, int8, int16, int32, int64:
- res = val.Int()
- case uint, uint8, uint16, uint32, uint64:
- res = int64(val.Uint())
- case float32, float64:
- res = int64(val.Float())
- case string:
- if IsInt(val.String()) {
- res, err = strconv.ParseInt(val.String(), 0, 64)
- if err != nil {
- res = 0
- }
- } else {
- err = fmt.Errorf("ToInt: invalid numeric format %g", value)
- res = 0
- }
- default:
- err = fmt.Errorf("ToInt: unknown interface type %T", value)
- res = 0
- }
-
- return
-}
-
-// ToBoolean convert the input string to a boolean.
-func ToBoolean(str string) (bool, error) {
- return strconv.ParseBool(str)
-}
diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go
deleted file mode 100644
index 55dce62dc..000000000
--- a/vendor/github.com/asaskevich/govalidator/doc.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package govalidator
-
-// A package of validators and sanitizers for strings, structures and collections.
diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go
deleted file mode 100644
index 1da2336f4..000000000
--- a/vendor/github.com/asaskevich/govalidator/error.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package govalidator
-
-import (
- "sort"
- "strings"
-)
-
-// Errors is an array of multiple errors and conforms to the error interface.
-type Errors []error
-
-// Errors returns itself.
-func (es Errors) Errors() []error {
- return es
-}
-
-func (es Errors) Error() string {
- var errs []string
- for _, e := range es {
- errs = append(errs, e.Error())
- }
- sort.Strings(errs)
- return strings.Join(errs, ";")
-}
-
-// Error encapsulates a name, an error and whether there's a custom error message or not.
-type Error struct {
- Name string
- Err error
- CustomErrorMessageExists bool
-
- // Validator indicates the name of the validator that failed
- Validator string
- Path []string
-}
-
-func (e Error) Error() string {
- if e.CustomErrorMessageExists {
- return e.Err.Error()
- }
-
- errName := e.Name
- if len(e.Path) > 0 {
- errName = strings.Join(append(e.Path, e.Name), ".")
- }
-
- return errName + ": " + e.Err.Error()
-}
diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go
deleted file mode 100644
index 5041d9e86..000000000
--- a/vendor/github.com/asaskevich/govalidator/numerics.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package govalidator
-
-import (
- "math"
-)
-
-// Abs returns absolute value of number
-func Abs(value float64) float64 {
- return math.Abs(value)
-}
-
-// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
-func Sign(value float64) float64 {
- if value > 0 {
- return 1
- } else if value < 0 {
- return -1
- } else {
- return 0
- }
-}
-
-// IsNegative returns true if value < 0
-func IsNegative(value float64) bool {
- return value < 0
-}
-
-// IsPositive returns true if value > 0
-func IsPositive(value float64) bool {
- return value > 0
-}
-
-// IsNonNegative returns true if value >= 0
-func IsNonNegative(value float64) bool {
- return value >= 0
-}
-
-// IsNonPositive returns true if value <= 0
-func IsNonPositive(value float64) bool {
- return value <= 0
-}
-
-// InRangeInt returns true if value lies between left and right border
-func InRangeInt(value, left, right interface{}) bool {
- value64, _ := ToInt(value)
- left64, _ := ToInt(left)
- right64, _ := ToInt(right)
- if left64 > right64 {
- left64, right64 = right64, left64
- }
- return value64 >= left64 && value64 <= right64
-}
-
-// InRangeFloat32 returns true if value lies between left and right border
-func InRangeFloat32(value, left, right float32) bool {
- if left > right {
- left, right = right, left
- }
- return value >= left && value <= right
-}
-
-// InRangeFloat64 returns true if value lies between left and right border
-func InRangeFloat64(value, left, right float64) bool {
- if left > right {
- left, right = right, left
- }
- return value >= left && value <= right
-}
-
-// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string.
-// All types must the same type.
-// False if value doesn't lie in range or if it incompatible or not comparable
-func InRange(value interface{}, left interface{}, right interface{}) bool {
- switch value.(type) {
- case int:
- intValue, _ := ToInt(value)
- intLeft, _ := ToInt(left)
- intRight, _ := ToInt(right)
- return InRangeInt(intValue, intLeft, intRight)
- case float32, float64:
- intValue, _ := ToFloat(value)
- intLeft, _ := ToFloat(left)
- intRight, _ := ToFloat(right)
- return InRangeFloat64(intValue, intLeft, intRight)
- case string:
- return value.(string) >= left.(string) && value.(string) <= right.(string)
- default:
- return false
- }
-}
-
-// IsWhole returns true if value is whole number
-func IsWhole(value float64) bool {
- return math.Remainder(value, 1) == 0
-}
-
-// IsNatural returns true if value is natural number (positive and whole)
-func IsNatural(value float64) bool {
- return IsWhole(value) && IsPositive(value)
-}
diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go
deleted file mode 100644
index bafc3765e..000000000
--- a/vendor/github.com/asaskevich/govalidator/patterns.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package govalidator
-
-import "regexp"
-
-// Basic regular expressions for validating strings
-const (
- Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
- CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$"
- ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
- ISBN13 string = "^(?:[0-9]{13})$"
- UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
- UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
- UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
- UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
- Alpha string = "^[a-zA-Z]+$"
- Alphanumeric string = "^[a-zA-Z0-9]+$"
- Numeric string = "^[0-9]+$"
- Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
- Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
- Hexadecimal string = "^[0-9a-fA-F]+$"
- Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
- RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
- ASCII string = "^[\x00-\x7F]+$"
- Multibyte string = "[^\x00-\x7F]"
- FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
- HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
- Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
- PrintableASCII string = "^[\x20-\x7E]+$"
- DataURI string = "^data:.+\\/(.+);base64$"
- MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$"
- Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
- Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
- DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
- IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
- URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
- URLUsername string = `(\S+(:\S*)?@)`
- URLPath string = `((\/|\?|#)[^\s]*)`
- URLPort string = `(:(\d{1,5}))`
- URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))`
- URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
- URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
- SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
- WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
- UnixPath string = `^(/[^/\x00]*)+/?$`
- WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
- UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$`
- Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
- tagName string = "valid"
- hasLowerCase string = ".*[[:lower:]]"
- hasUpperCase string = ".*[[:upper:]]"
- hasWhitespace string = ".*[[:space:]]"
- hasWhitespaceOnly string = "^[[:space:]]+$"
- IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$"
- IMSI string = "^\\d{14,15}$"
- E164 string = `^\+?[1-9]\d{1,14}$`
-)
-
-// Used by IsFilePath func
-const (
- // Unknown is unresolved OS type
- Unknown = iota
- // Win is Windows type
- Win
- // Unix is *nix OS types
- Unix
-)
-
-var (
- userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
- hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
- userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
- rxEmail = regexp.MustCompile(Email)
- rxCreditCard = regexp.MustCompile(CreditCard)
- rxISBN10 = regexp.MustCompile(ISBN10)
- rxISBN13 = regexp.MustCompile(ISBN13)
- rxUUID3 = regexp.MustCompile(UUID3)
- rxUUID4 = regexp.MustCompile(UUID4)
- rxUUID5 = regexp.MustCompile(UUID5)
- rxUUID = regexp.MustCompile(UUID)
- rxAlpha = regexp.MustCompile(Alpha)
- rxAlphanumeric = regexp.MustCompile(Alphanumeric)
- rxNumeric = regexp.MustCompile(Numeric)
- rxInt = regexp.MustCompile(Int)
- rxFloat = regexp.MustCompile(Float)
- rxHexadecimal = regexp.MustCompile(Hexadecimal)
- rxHexcolor = regexp.MustCompile(Hexcolor)
- rxRGBcolor = regexp.MustCompile(RGBcolor)
- rxASCII = regexp.MustCompile(ASCII)
- rxPrintableASCII = regexp.MustCompile(PrintableASCII)
- rxMultibyte = regexp.MustCompile(Multibyte)
- rxFullWidth = regexp.MustCompile(FullWidth)
- rxHalfWidth = regexp.MustCompile(HalfWidth)
- rxBase64 = regexp.MustCompile(Base64)
- rxDataURI = regexp.MustCompile(DataURI)
- rxMagnetURI = regexp.MustCompile(MagnetURI)
- rxLatitude = regexp.MustCompile(Latitude)
- rxLongitude = regexp.MustCompile(Longitude)
- rxDNSName = regexp.MustCompile(DNSName)
- rxURL = regexp.MustCompile(URL)
- rxSSN = regexp.MustCompile(SSN)
- rxWinPath = regexp.MustCompile(WinPath)
- rxUnixPath = regexp.MustCompile(UnixPath)
- rxARWinPath = regexp.MustCompile(WinARPath)
- rxARUnixPath = regexp.MustCompile(UnixARPath)
- rxSemver = regexp.MustCompile(Semver)
- rxHasLowerCase = regexp.MustCompile(hasLowerCase)
- rxHasUpperCase = regexp.MustCompile(hasUpperCase)
- rxHasWhitespace = regexp.MustCompile(hasWhitespace)
- rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
- rxIMEI = regexp.MustCompile(IMEI)
- rxIMSI = regexp.MustCompile(IMSI)
- rxE164 = regexp.MustCompile(E164)
-)
diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go
deleted file mode 100644
index c573abb51..000000000
--- a/vendor/github.com/asaskevich/govalidator/types.go
+++ /dev/null
@@ -1,656 +0,0 @@
-package govalidator
-
-import (
- "reflect"
- "regexp"
- "sort"
- "sync"
-)
-
-// Validator is a wrapper for a validator function that returns bool and accepts string.
-type Validator func(str string) bool
-
-// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
-// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
-type CustomTypeValidator func(i interface{}, o interface{}) bool
-
-// ParamValidator is a wrapper for validator functions that accept additional parameters.
-type ParamValidator func(str string, params ...string) bool
-
-// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value
-type InterfaceParamValidator func(in interface{}, params ...string) bool
-type tagOptionsMap map[string]tagOption
-
-func (t tagOptionsMap) orderedKeys() []string {
- var keys []string
- for k := range t {
- keys = append(keys, k)
- }
-
- sort.Slice(keys, func(a, b int) bool {
- return t[keys[a]].order < t[keys[b]].order
- })
-
- return keys
-}
-
-type tagOption struct {
- name string
- customErrorMessage string
- order int
-}
-
-// UnsupportedTypeError is a wrapper for reflect.Type
-type UnsupportedTypeError struct {
- Type reflect.Type
-}
-
-// stringValues is a slice of reflect.Value holding *reflect.StringValue.
-// It implements the methods to sort by string.
-type stringValues []reflect.Value
-
-// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value
-var InterfaceParamTagMap = map[string]InterfaceParamValidator{
- "type": IsType,
-}
-
-// InterfaceParamTagRegexMap maps interface param tags to their respective regexes.
-var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{
- "type": regexp.MustCompile(`^type\((.*)\)$`),
-}
-
-// ParamTagMap is a map of functions accept variants parameters
-var ParamTagMap = map[string]ParamValidator{
- "length": ByteLength,
- "range": Range,
- "runelength": RuneLength,
- "stringlength": StringLength,
- "matches": StringMatches,
- "in": IsInRaw,
- "rsapub": IsRsaPub,
- "minstringlength": MinStringLength,
- "maxstringlength": MaxStringLength,
-}
-
-// ParamTagRegexMap maps param tags to their respective regexes.
-var ParamTagRegexMap = map[string]*regexp.Regexp{
- "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
- "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
- "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
- "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
- "in": regexp.MustCompile(`^in\((.*)\)`),
- "matches": regexp.MustCompile(`^matches\((.+)\)$`),
- "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
- "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"),
- "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"),
-}
-
-type customTypeTagMap struct {
- validators map[string]CustomTypeValidator
-
- sync.RWMutex
-}
-
-func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
- tm.RLock()
- defer tm.RUnlock()
- v, ok := tm.validators[name]
- return v, ok
-}
-
-func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
- tm.Lock()
- defer tm.Unlock()
- tm.validators[name] = ctv
-}
-
-// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
-// Use this to validate compound or custom types that need to be handled as a whole, e.g.
-// `type UUID [16]byte` (this would be handled as an array of bytes).
-var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
-
-// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
-var TagMap = map[string]Validator{
- "email": IsEmail,
- "url": IsURL,
- "dialstring": IsDialString,
- "requrl": IsRequestURL,
- "requri": IsRequestURI,
- "alpha": IsAlpha,
- "utfletter": IsUTFLetter,
- "alphanum": IsAlphanumeric,
- "utfletternum": IsUTFLetterNumeric,
- "numeric": IsNumeric,
- "utfnumeric": IsUTFNumeric,
- "utfdigit": IsUTFDigit,
- "hexadecimal": IsHexadecimal,
- "hexcolor": IsHexcolor,
- "rgbcolor": IsRGBcolor,
- "lowercase": IsLowerCase,
- "uppercase": IsUpperCase,
- "int": IsInt,
- "float": IsFloat,
- "null": IsNull,
- "notnull": IsNotNull,
- "uuid": IsUUID,
- "uuidv3": IsUUIDv3,
- "uuidv4": IsUUIDv4,
- "uuidv5": IsUUIDv5,
- "creditcard": IsCreditCard,
- "isbn10": IsISBN10,
- "isbn13": IsISBN13,
- "json": IsJSON,
- "multibyte": IsMultibyte,
- "ascii": IsASCII,
- "printableascii": IsPrintableASCII,
- "fullwidth": IsFullWidth,
- "halfwidth": IsHalfWidth,
- "variablewidth": IsVariableWidth,
- "base64": IsBase64,
- "datauri": IsDataURI,
- "ip": IsIP,
- "port": IsPort,
- "ipv4": IsIPv4,
- "ipv6": IsIPv6,
- "dns": IsDNSName,
- "host": IsHost,
- "mac": IsMAC,
- "latitude": IsLatitude,
- "longitude": IsLongitude,
- "ssn": IsSSN,
- "semver": IsSemver,
- "rfc3339": IsRFC3339,
- "rfc3339WithoutZone": IsRFC3339WithoutZone,
- "ISO3166Alpha2": IsISO3166Alpha2,
- "ISO3166Alpha3": IsISO3166Alpha3,
- "ISO4217": IsISO4217,
- "IMEI": IsIMEI,
- "ulid": IsULID,
-}
-
-// ISO3166Entry stores country codes
-type ISO3166Entry struct {
- EnglishShortName string
- FrenchShortName string
- Alpha2Code string
- Alpha3Code string
- Numeric string
-}
-
-//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
-var ISO3166List = []ISO3166Entry{
- {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
- {"Albania", "Albanie (l')", "AL", "ALB", "008"},
- {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
- {"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
- {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
- {"Andorra", "Andorre (l')", "AD", "AND", "020"},
- {"Angola", "Angola (l')", "AO", "AGO", "024"},
- {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
- {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
- {"Argentina", "Argentine (l')", "AR", "ARG", "032"},
- {"Australia", "Australie (l')", "AU", "AUS", "036"},
- {"Austria", "Autriche (l')", "AT", "AUT", "040"},
- {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
- {"Bahrain", "Bahreïn", "BH", "BHR", "048"},
- {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
- {"Armenia", "Arménie (l')", "AM", "ARM", "051"},
- {"Barbados", "Barbade (la)", "BB", "BRB", "052"},
- {"Belgium", "Belgique (la)", "BE", "BEL", "056"},
- {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
- {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
- {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
- {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
- {"Botswana", "Botswana (le)", "BW", "BWA", "072"},
- {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
- {"Brazil", "Brésil (le)", "BR", "BRA", "076"},
- {"Belize", "Belize (le)", "BZ", "BLZ", "084"},
- {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
- {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
- {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
- {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
- {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
- {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
- {"Burundi", "Burundi (le)", "BI", "BDI", "108"},
- {"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
- {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
- {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
- {"Canada", "Canada (le)", "CA", "CAN", "124"},
- {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
- {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
- {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
- {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
- {"Chad", "Tchad (le)", "TD", "TCD", "148"},
- {"Chile", "Chili (le)", "CL", "CHL", "152"},
- {"China", "Chine (la)", "CN", "CHN", "156"},
- {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
- {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
- {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
- {"Colombia", "Colombie (la)", "CO", "COL", "170"},
- {"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
- {"Mayotte", "Mayotte", "YT", "MYT", "175"},
- {"Congo (the)", "Congo (le)", "CG", "COG", "178"},
- {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
- {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
- {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
- {"Croatia", "Croatie (la)", "HR", "HRV", "191"},
- {"Cuba", "Cuba", "CU", "CUB", "192"},
- {"Cyprus", "Chypre", "CY", "CYP", "196"},
- {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
- {"Benin", "Bénin (le)", "BJ", "BEN", "204"},
- {"Denmark", "Danemark (le)", "DK", "DNK", "208"},
- {"Dominica", "Dominique (la)", "DM", "DMA", "212"},
- {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
- {"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
- {"El Salvador", "El Salvador", "SV", "SLV", "222"},
- {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
- {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
- {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
- {"Estonia", "Estonie (l')", "EE", "EST", "233"},
- {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
- {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
- {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
- {"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
- {"Finland", "Finlande (la)", "FI", "FIN", "246"},
- {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
- {"France", "France (la)", "FR", "FRA", "250"},
- {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
- {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
- {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
- {"Djibouti", "Djibouti", "DJ", "DJI", "262"},
- {"Gabon", "Gabon (le)", "GA", "GAB", "266"},
- {"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
- {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
- {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
- {"Germany", "Allemagne (l')", "DE", "DEU", "276"},
- {"Ghana", "Ghana (le)", "GH", "GHA", "288"},
- {"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
- {"Kiribati", "Kiribati", "KI", "KIR", "296"},
- {"Greece", "Grèce (la)", "GR", "GRC", "300"},
- {"Greenland", "Groenland (le)", "GL", "GRL", "304"},
- {"Grenada", "Grenade (la)", "GD", "GRD", "308"},
- {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
- {"Guam", "Guam", "GU", "GUM", "316"},
- {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
- {"Guinea", "Guinée (la)", "GN", "GIN", "324"},
- {"Guyana", "Guyana (le)", "GY", "GUY", "328"},
- {"Haiti", "Haïti", "HT", "HTI", "332"},
- {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
- {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
- {"Honduras", "Honduras (le)", "HN", "HND", "340"},
- {"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
- {"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
- {"Iceland", "Islande (l')", "IS", "ISL", "352"},
- {"India", "Inde (l')", "IN", "IND", "356"},
- {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
- {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
- {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
- {"Ireland", "Irlande (l')", "IE", "IRL", "372"},
- {"Israel", "Israël", "IL", "ISR", "376"},
- {"Italy", "Italie (l')", "IT", "ITA", "380"},
- {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
- {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
- {"Japan", "Japon (le)", "JP", "JPN", "392"},
- {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
- {"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
- {"Kenya", "Kenya (le)", "KE", "KEN", "404"},
- {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
- {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
- {"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
- {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
- {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
- {"Lebanon", "Liban (le)", "LB", "LBN", "422"},
- {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
- {"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
- {"Liberia", "Libéria (le)", "LR", "LBR", "430"},
- {"Libya", "Libye (la)", "LY", "LBY", "434"},
- {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
- {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
- {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
- {"Macao", "Macao", "MO", "MAC", "446"},
- {"Madagascar", "Madagascar", "MG", "MDG", "450"},
- {"Malawi", "Malawi (le)", "MW", "MWI", "454"},
- {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
- {"Maldives", "Maldives (les)", "MV", "MDV", "462"},
- {"Mali", "Mali (le)", "ML", "MLI", "466"},
- {"Malta", "Malte", "MT", "MLT", "470"},
- {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
- {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
- {"Mauritius", "Maurice", "MU", "MUS", "480"},
- {"Mexico", "Mexique (le)", "MX", "MEX", "484"},
- {"Monaco", "Monaco", "MC", "MCO", "492"},
- {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
- {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
- {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
- {"Montserrat", "Montserrat", "MS", "MSR", "500"},
- {"Morocco", "Maroc (le)", "MA", "MAR", "504"},
- {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
- {"Oman", "Oman", "OM", "OMN", "512"},
- {"Namibia", "Namibie (la)", "NA", "NAM", "516"},
- {"Nauru", "Nauru", "NR", "NRU", "520"},
- {"Nepal", "Népal (le)", "NP", "NPL", "524"},
- {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
- {"Curaçao", "Curaçao", "CW", "CUW", "531"},
- {"Aruba", "Aruba", "AW", "ABW", "533"},
- {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
- {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
- {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
- {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
- {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
- {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
- {"Niger (the)", "Niger (le)", "NE", "NER", "562"},
- {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
- {"Niue", "Niue", "NU", "NIU", "570"},
- {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
- {"Norway", "Norvège (la)", "NO", "NOR", "578"},
- {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
- {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
- {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
- {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
- {"Palau", "Palaos (les)", "PW", "PLW", "585"},
- {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
- {"Panama", "Panama (le)", "PA", "PAN", "591"},
- {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
- {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
- {"Peru", "Pérou (le)", "PE", "PER", "604"},
- {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
- {"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
- {"Poland", "Pologne (la)", "PL", "POL", "616"},
- {"Portugal", "Portugal (le)", "PT", "PRT", "620"},
- {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
- {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
- {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
- {"Qatar", "Qatar (le)", "QA", "QAT", "634"},
- {"Réunion", "Réunion (La)", "RE", "REU", "638"},
- {"Romania", "Roumanie (la)", "RO", "ROU", "642"},
- {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
- {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
- {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
- {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
- {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
- {"Anguilla", "Anguilla", "AI", "AIA", "660"},
- {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
- {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
- {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
- {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
- {"San Marino", "Saint-Marin", "SM", "SMR", "674"},
- {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
- {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
- {"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
- {"Serbia", "Serbie (la)", "RS", "SRB", "688"},
- {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
- {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
- {"Singapore", "Singapour", "SG", "SGP", "702"},
- {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
- {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
- {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
- {"Somalia", "Somalie (la)", "SO", "SOM", "706"},
- {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
- {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
- {"Spain", "Espagne (l')", "ES", "ESP", "724"},
- {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
- {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
- {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
- {"Suriname", "Suriname (le)", "SR", "SUR", "740"},
- {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
- {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
- {"Sweden", "Suède (la)", "SE", "SWE", "752"},
- {"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
- {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
- {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
- {"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
- {"Togo", "Togo (le)", "TG", "TGO", "768"},
- {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
- {"Tonga", "Tonga (les)", "TO", "TON", "776"},
- {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
- {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
- {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
- {"Turkey", "Turquie (la)", "TR", "TUR", "792"},
- {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
- {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
- {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
- {"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
- {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
- {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
- {"Egypt", "Égypte (l')", "EG", "EGY", "818"},
- {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
- {"Guernsey", "Guernesey", "GG", "GGY", "831"},
- {"Jersey", "Jersey", "JE", "JEY", "832"},
- {"Isle of Man", "Île de Man", "IM", "IMN", "833"},
- {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
- {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
- {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
- {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
- {"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
- {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
- {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
- {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
- {"Samoa", "Samoa (le)", "WS", "WSM", "882"},
- {"Yemen", "Yémen (le)", "YE", "YEM", "887"},
- {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
-}
-
-// ISO4217List is the list of ISO currency codes
-var ISO4217List = []string{
- "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
- "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
- "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
- "DJF", "DKK", "DOP", "DZD",
- "EGP", "ERN", "ETB", "EUR",
- "FJD", "FKP",
- "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
- "HKD", "HNL", "HRK", "HTG", "HUF",
- "IDR", "ILS", "INR", "IQD", "IRR", "ISK",
- "JMD", "JOD", "JPY",
- "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
- "LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
- "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
- "NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
- "OMR",
- "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
- "QAR",
- "RON", "RSD", "RUB", "RWF",
- "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL",
- "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
- "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS",
- "VEF", "VES", "VND", "VUV",
- "WST",
- "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
- "YER",
- "ZAR", "ZMW", "ZWL",
-}
-
-// ISO693Entry stores ISO language codes
-type ISO693Entry struct {
- Alpha3bCode string
- Alpha2Code string
- English string
-}
-
-//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
-var ISO693List = []ISO693Entry{
- {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
- {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
- {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
- {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
- {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
- {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
- {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
- {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
- {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
- {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
- {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
- {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
- {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
- {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
- {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
- {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
- {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
- {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
- {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
- {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
- {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
- {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
- {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
- {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
- {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
- {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
- {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
- {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
- {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
- {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
- {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
- {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
- {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
- {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
- {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
- {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
- {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
- {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
- {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
- {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
- {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
- {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
- {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
- {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
- {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
- {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
- {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
- {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
- {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
- {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
- {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
- {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
- {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
- {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
- {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
- {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
- {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
- {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
- {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
- {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
- {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
- {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
- {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
- {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
- {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
- {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
- {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
- {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
- {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
- {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
- {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
- {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
- {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
- {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
- {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
- {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
- {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
- {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
- {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
- {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
- {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
- {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
- {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
- {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
- {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
- {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
- {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
- {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
- {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
- {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
- {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
- {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
- {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
- {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
- {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
- {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
- {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
- {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
- {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
- {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
- {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
- {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
- {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
- {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
- {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
- {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
- {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
- {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
- {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
- {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
- {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
- {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
- {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
- {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
- {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
- {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
- {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
- {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
- {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
- {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
- {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
- {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
- {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
- {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
- {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
- {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
- {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
- {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
- {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
- {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
- {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
- {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
- {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
- {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
- {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
- {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
- {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
- {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
- {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
- {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
- {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
- {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
- {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
- {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
- {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
- {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
- {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
- {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
- {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
- {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
- {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
- {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
- {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
- {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
- {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
- {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
- {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
- {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
- {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
- {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
- {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
- {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
- {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
- {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
- {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
- {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
- {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
- {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
- {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
- {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
- {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
- {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
- {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
- {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
- {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
- {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
- {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
- {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
- {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
- {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
- {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
- {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
- {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
- {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
-}
diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go
deleted file mode 100644
index f4c30f824..000000000
--- a/vendor/github.com/asaskevich/govalidator/utils.go
+++ /dev/null
@@ -1,270 +0,0 @@
-package govalidator
-
-import (
- "errors"
- "fmt"
- "html"
- "math"
- "path"
- "regexp"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// Contains checks if the string contains the substring.
-func Contains(str, substring string) bool {
- return strings.Contains(str, substring)
-}
-
-// Matches checks if string matches the pattern (pattern is regular expression)
-// In case of error return false
-func Matches(str, pattern string) bool {
- match, _ := regexp.MatchString(pattern, str)
- return match
-}
-
-// LeftTrim trims characters from the left side of the input.
-// If second argument is empty, it will remove leading spaces.
-func LeftTrim(str, chars string) string {
- if chars == "" {
- return strings.TrimLeftFunc(str, unicode.IsSpace)
- }
- r, _ := regexp.Compile("^[" + chars + "]+")
- return r.ReplaceAllString(str, "")
-}
-
-// RightTrim trims characters from the right side of the input.
-// If second argument is empty, it will remove trailing spaces.
-func RightTrim(str, chars string) string {
- if chars == "" {
- return strings.TrimRightFunc(str, unicode.IsSpace)
- }
- r, _ := regexp.Compile("[" + chars + "]+$")
- return r.ReplaceAllString(str, "")
-}
-
-// Trim trims characters from both sides of the input.
-// If second argument is empty, it will remove spaces.
-func Trim(str, chars string) string {
- return LeftTrim(RightTrim(str, chars), chars)
-}
-
-// WhiteList removes characters that do not appear in the whitelist.
-func WhiteList(str, chars string) string {
- pattern := "[^" + chars + "]+"
- r, _ := regexp.Compile(pattern)
- return r.ReplaceAllString(str, "")
-}
-
-// BlackList removes characters that appear in the blacklist.
-func BlackList(str, chars string) string {
- pattern := "[" + chars + "]+"
- r, _ := regexp.Compile(pattern)
- return r.ReplaceAllString(str, "")
-}
-
-// StripLow removes characters with a numerical value < 32 and 127, mostly control characters.
-// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
-func StripLow(str string, keepNewLines bool) string {
- chars := ""
- if keepNewLines {
- chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
- } else {
- chars = "\x00-\x1F\x7F"
- }
- return BlackList(str, chars)
-}
-
-// ReplacePattern replaces regular expression pattern in string
-func ReplacePattern(str, pattern, replace string) string {
- r, _ := regexp.Compile(pattern)
- return r.ReplaceAllString(str, replace)
-}
-
-// Escape replaces <, >, & and " with HTML entities.
-var Escape = html.EscapeString
-
-func addSegment(inrune, segment []rune) []rune {
- if len(segment) == 0 {
- return inrune
- }
- if len(inrune) != 0 {
- inrune = append(inrune, '_')
- }
- inrune = append(inrune, segment...)
- return inrune
-}
-
-// UnderscoreToCamelCase converts from underscore separated form to camel case form.
-// Ex.: my_func => MyFunc
-func UnderscoreToCamelCase(s string) string {
- return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
-}
-
-// CamelCaseToUnderscore converts from camel case form to underscore separated form.
-// Ex.: MyFunc => my_func
-func CamelCaseToUnderscore(str string) string {
- var output []rune
- var segment []rune
- for _, r := range str {
-
- // not treat number as separate segment
- if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
- output = addSegment(output, segment)
- segment = nil
- }
- segment = append(segment, unicode.ToLower(r))
- }
- output = addSegment(output, segment)
- return string(output)
-}
-
-// Reverse returns reversed string
-func Reverse(s string) string {
- r := []rune(s)
- for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
- r[i], r[j] = r[j], r[i]
- }
- return string(r)
-}
-
-// GetLines splits string by "\n" and return array of lines
-func GetLines(s string) []string {
- return strings.Split(s, "\n")
-}
-
-// GetLine returns specified line of multiline string
-func GetLine(s string, index int) (string, error) {
- lines := GetLines(s)
- if index < 0 || index >= len(lines) {
- return "", errors.New("line index out of bounds")
- }
- return lines[index], nil
-}
-
-// RemoveTags removes all tags from HTML string
-func RemoveTags(s string) string {
- return ReplacePattern(s, "<[^>]*>", "")
-}
-
-// SafeFileName returns safe string that can be used in file names
-func SafeFileName(str string) string {
- name := strings.ToLower(str)
- name = path.Clean(path.Base(name))
- name = strings.Trim(name, " ")
- separators, err := regexp.Compile(`[ &_=+:]`)
- if err == nil {
- name = separators.ReplaceAllString(name, "-")
- }
- legal, err := regexp.Compile(`[^[:alnum:]-.]`)
- if err == nil {
- name = legal.ReplaceAllString(name, "")
- }
- for strings.Contains(name, "--") {
- name = strings.Replace(name, "--", "-", -1)
- }
- return name
-}
-
-// NormalizeEmail canonicalize an email address.
-// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
-// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
-// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
-// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
-// normalized to @gmail.com.
-func NormalizeEmail(str string) (string, error) {
- if !IsEmail(str) {
- return "", fmt.Errorf("%s is not an email", str)
- }
- parts := strings.Split(str, "@")
- parts[0] = strings.ToLower(parts[0])
- parts[1] = strings.ToLower(parts[1])
- if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
- parts[1] = "gmail.com"
- parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
- }
- return strings.Join(parts, "@"), nil
-}
-
-// Truncate a string to the closest length without breaking words.
-func Truncate(str string, length int, ending string) string {
- var aftstr, befstr string
- if len(str) > length {
- words := strings.Fields(str)
- before, present := 0, 0
- for i := range words {
- befstr = aftstr
- before = present
- aftstr = aftstr + words[i] + " "
- present = len(aftstr)
- if present > length && i != 0 {
- if (length - before) < (present - length) {
- return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
- }
- return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
- }
- }
- }
-
- return str
-}
-
-// PadLeft pads left side of a string if size of string is less then indicated pad length
-func PadLeft(str string, padStr string, padLen int) string {
- return buildPadStr(str, padStr, padLen, true, false)
-}
-
-// PadRight pads right side of a string if size of string is less then indicated pad length
-func PadRight(str string, padStr string, padLen int) string {
- return buildPadStr(str, padStr, padLen, false, true)
-}
-
-// PadBoth pads both sides of a string if size of string is less then indicated pad length
-func PadBoth(str string, padStr string, padLen int) string {
- return buildPadStr(str, padStr, padLen, true, true)
-}
-
-// PadString either left, right or both sides.
-// Note that padding string can be unicode and more then one character
-func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
-
- // When padded length is less then the current string size
- if padLen < utf8.RuneCountInString(str) {
- return str
- }
-
- padLen -= utf8.RuneCountInString(str)
-
- targetLen := padLen
-
- targetLenLeft := targetLen
- targetLenRight := targetLen
- if padLeft && padRight {
- targetLenLeft = padLen / 2
- targetLenRight = padLen - targetLenLeft
- }
-
- strToRepeatLen := utf8.RuneCountInString(padStr)
-
- repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
- repeatedString := strings.Repeat(padStr, repeatTimes)
-
- leftSide := ""
- if padLeft {
- leftSide = repeatedString[0:targetLenLeft]
- }
-
- rightSide := ""
- if padRight {
- rightSide = repeatedString[0:targetLenRight]
- }
-
- return leftSide + str + rightSide
-}
-
-// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
-func TruncatingErrorf(str string, args ...interface{}) error {
- n := strings.Count(str, "%s")
- return fmt.Errorf(str, args[:n]...)
-}
diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go
deleted file mode 100644
index 46ecfc84a..000000000
--- a/vendor/github.com/asaskevich/govalidator/validator.go
+++ /dev/null
@@ -1,1769 +0,0 @@
-// Package govalidator is package of validators and sanitizers for strings, structs and collections.
-package govalidator
-
-import (
- "bytes"
- "crypto/rsa"
- "crypto/x509"
- "encoding/base64"
- "encoding/json"
- "encoding/pem"
- "fmt"
- "io/ioutil"
- "net"
- "net/url"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "time"
- "unicode"
- "unicode/utf8"
-)
-
-var (
- fieldsRequiredByDefault bool
- nilPtrAllowedByRequired = false
- notNumberRegexp = regexp.MustCompile("[^0-9]+")
- whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`)
- paramsRegexp = regexp.MustCompile(`\(.*\)$`)
-)
-
-const maxURLRuneCount = 2083
-const minURLRuneCount = 3
-const rfc3339WithoutZone = "2006-01-02T15:04:05"
-
-// SetFieldsRequiredByDefault causes validation to fail when struct fields
-// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`).
-// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
-// type exampleStruct struct {
-// Name string ``
-// Email string `valid:"email"`
-// This, however, will only fail when Email is empty or an invalid email address:
-// type exampleStruct2 struct {
-// Name string `valid:"-"`
-// Email string `valid:"email"`
-// Lastly, this will only fail when Email is an invalid email address but not when it's empty:
-// type exampleStruct2 struct {
-// Name string `valid:"-"`
-// Email string `valid:"email,optional"`
-func SetFieldsRequiredByDefault(value bool) {
- fieldsRequiredByDefault = value
-}
-
-// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required.
-// The validation will still reject ptr fields in their zero value state. Example with this enabled:
-// type exampleStruct struct {
-// Name *string `valid:"required"`
-// With `Name` set to "", this will be considered invalid input and will cause a validation error.
-// With `Name` set to nil, this will be considered valid by validation.
-// By default this is disabled.
-func SetNilPtrAllowedByRequired(value bool) {
- nilPtrAllowedByRequired = value
-}
-
-// IsEmail checks if the string is an email.
-func IsEmail(str string) bool {
- // TODO uppercase letters are not supported
- return rxEmail.MatchString(str)
-}
-
-// IsExistingEmail checks if the string is an email of existing domain
-func IsExistingEmail(email string) bool {
-
- if len(email) < 6 || len(email) > 254 {
- return false
- }
- at := strings.LastIndex(email, "@")
- if at <= 0 || at > len(email)-3 {
- return false
- }
- user := email[:at]
- host := email[at+1:]
- if len(user) > 64 {
- return false
- }
- switch host {
- case "localhost", "example.com":
- return true
- }
- if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) {
- return false
- }
- if _, err := net.LookupMX(host); err != nil {
- if _, err := net.LookupIP(host); err != nil {
- return false
- }
- }
-
- return true
-}
-
-// IsURL checks if the string is an URL.
-func IsURL(str string) bool {
- if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") {
- return false
- }
- strTemp := str
- if strings.Contains(str, ":") && !strings.Contains(str, "://") {
- // support no indicated urlscheme but with colon for port number
- // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString
- strTemp = "http://" + str
- }
- u, err := url.Parse(strTemp)
- if err != nil {
- return false
- }
- if strings.HasPrefix(u.Host, ".") {
- return false
- }
- if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) {
- return false
- }
- return rxURL.MatchString(str)
-}
-
-// IsRequestURL checks if the string rawurl, assuming
-// it was received in an HTTP request, is a valid
-// URL confirm to RFC 3986
-func IsRequestURL(rawurl string) bool {
- url, err := url.ParseRequestURI(rawurl)
- if err != nil {
- return false //Couldn't even parse the rawurl
- }
- if len(url.Scheme) == 0 {
- return false //No Scheme found
- }
- return true
-}
-
-// IsRequestURI checks if the string rawurl, assuming
-// it was received in an HTTP request, is an
-// absolute URI or an absolute path.
-func IsRequestURI(rawurl string) bool {
- _, err := url.ParseRequestURI(rawurl)
- return err == nil
-}
-
-// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid.
-func IsAlpha(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxAlpha.MatchString(str)
-}
-
-//IsUTFLetter checks if the string contains only unicode letter characters.
-//Similar to IsAlpha but for all languages. Empty string is valid.
-func IsUTFLetter(str string) bool {
- if IsNull(str) {
- return true
- }
-
- for _, c := range str {
- if !unicode.IsLetter(c) {
- return false
- }
- }
- return true
-
-}
-
-// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid.
-func IsAlphanumeric(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxAlphanumeric.MatchString(str)
-}
-
-// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid.
-func IsUTFLetterNumeric(str string) bool {
- if IsNull(str) {
- return true
- }
- for _, c := range str {
- if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok
- return false
- }
- }
- return true
-
-}
-
-// IsNumeric checks if the string contains only numbers. Empty string is valid.
-func IsNumeric(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxNumeric.MatchString(str)
-}
-
-// IsUTFNumeric checks if the string contains only unicode numbers of any kind.
-// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid.
-func IsUTFNumeric(str string) bool {
- if IsNull(str) {
- return true
- }
- if strings.IndexAny(str, "+-") > 0 {
- return false
- }
- if len(str) > 1 {
- str = strings.TrimPrefix(str, "-")
- str = strings.TrimPrefix(str, "+")
- }
- for _, c := range str {
- if !unicode.IsNumber(c) { //numbers && minus sign are ok
- return false
- }
- }
- return true
-
-}
-
-// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid.
-func IsUTFDigit(str string) bool {
- if IsNull(str) {
- return true
- }
- if strings.IndexAny(str, "+-") > 0 {
- return false
- }
- if len(str) > 1 {
- str = strings.TrimPrefix(str, "-")
- str = strings.TrimPrefix(str, "+")
- }
- for _, c := range str {
- if !unicode.IsDigit(c) { //digits && minus sign are ok
- return false
- }
- }
- return true
-
-}
-
-// IsHexadecimal checks if the string is a hexadecimal number.
-func IsHexadecimal(str string) bool {
- return rxHexadecimal.MatchString(str)
-}
-
-// IsHexcolor checks if the string is a hexadecimal color.
-func IsHexcolor(str string) bool {
- return rxHexcolor.MatchString(str)
-}
-
-// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB).
-func IsRGBcolor(str string) bool {
- return rxRGBcolor.MatchString(str)
-}
-
-// IsLowerCase checks if the string is lowercase. Empty string is valid.
-func IsLowerCase(str string) bool {
- if IsNull(str) {
- return true
- }
- return str == strings.ToLower(str)
-}
-
-// IsUpperCase checks if the string is uppercase. Empty string is valid.
-func IsUpperCase(str string) bool {
- if IsNull(str) {
- return true
- }
- return str == strings.ToUpper(str)
-}
-
-// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid.
-func HasLowerCase(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxHasLowerCase.MatchString(str)
-}
-
-// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid.
-func HasUpperCase(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxHasUpperCase.MatchString(str)
-}
-
-// IsInt checks if the string is an integer. Empty string is valid.
-func IsInt(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxInt.MatchString(str)
-}
-
-// IsFloat checks if the string is a float.
-func IsFloat(str string) bool {
- return str != "" && rxFloat.MatchString(str)
-}
-
-// IsDivisibleBy checks if the string is a number that's divisible by another.
-// If second argument is not valid integer or zero, it's return false.
-// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero).
-func IsDivisibleBy(str, num string) bool {
- f, _ := ToFloat(str)
- p := int64(f)
- q, _ := ToInt(num)
- if q == 0 {
- return false
- }
- return (p == 0) || (p%q == 0)
-}
-
-// IsNull checks if the string is null.
-func IsNull(str string) bool {
- return len(str) == 0
-}
-
-// IsNotNull checks if the string is not null.
-func IsNotNull(str string) bool {
- return !IsNull(str)
-}
-
-// HasWhitespaceOnly checks the string only contains whitespace
-func HasWhitespaceOnly(str string) bool {
- return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str)
-}
-
-// HasWhitespace checks if the string contains any whitespace
-func HasWhitespace(str string) bool {
- return len(str) > 0 && rxHasWhitespace.MatchString(str)
-}
-
-// IsByteLength checks if the string's length (in bytes) falls in a range.
-func IsByteLength(str string, min, max int) bool {
- return len(str) >= min && len(str) <= max
-}
-
-// IsUUIDv3 checks if the string is a UUID version 3.
-func IsUUIDv3(str string) bool {
- return rxUUID3.MatchString(str)
-}
-
-// IsUUIDv4 checks if the string is a UUID version 4.
-func IsUUIDv4(str string) bool {
- return rxUUID4.MatchString(str)
-}
-
-// IsUUIDv5 checks if the string is a UUID version 5.
-func IsUUIDv5(str string) bool {
- return rxUUID5.MatchString(str)
-}
-
-// IsUUID checks if the string is a UUID (version 3, 4 or 5).
-func IsUUID(str string) bool {
- return rxUUID.MatchString(str)
-}
-
-// Byte to index table for O(1) lookups when unmarshaling.
-// We use 0xFF as sentinel value for invalid indexes.
-var ulidDec = [...]byte{
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x01,
- 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
- 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14, 0x15, 0xFF,
- 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C, 0x1D, 0x1E,
- 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0A, 0x0B, 0x0C,
- 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0xFF, 0x12, 0x13, 0xFF, 0x14,
- 0x15, 0xFF, 0x16, 0x17, 0x18, 0x19, 0x1A, 0xFF, 0x1B, 0x1C,
- 0x1D, 0x1E, 0x1F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
-}
-
-// EncodedSize is the length of a text encoded ULID.
-const ulidEncodedSize = 26
-
-// IsULID checks if the string is a ULID.
-//
-// Implementation got from:
-// https://github.com/oklog/ulid (Apache-2.0 License)
-//
-func IsULID(str string) bool {
- // Check if a base32 encoded ULID is the right length.
- if len(str) != ulidEncodedSize {
- return false
- }
-
- // Check if all the characters in a base32 encoded ULID are part of the
- // expected base32 character set.
- if ulidDec[str[0]] == 0xFF ||
- ulidDec[str[1]] == 0xFF ||
- ulidDec[str[2]] == 0xFF ||
- ulidDec[str[3]] == 0xFF ||
- ulidDec[str[4]] == 0xFF ||
- ulidDec[str[5]] == 0xFF ||
- ulidDec[str[6]] == 0xFF ||
- ulidDec[str[7]] == 0xFF ||
- ulidDec[str[8]] == 0xFF ||
- ulidDec[str[9]] == 0xFF ||
- ulidDec[str[10]] == 0xFF ||
- ulidDec[str[11]] == 0xFF ||
- ulidDec[str[12]] == 0xFF ||
- ulidDec[str[13]] == 0xFF ||
- ulidDec[str[14]] == 0xFF ||
- ulidDec[str[15]] == 0xFF ||
- ulidDec[str[16]] == 0xFF ||
- ulidDec[str[17]] == 0xFF ||
- ulidDec[str[18]] == 0xFF ||
- ulidDec[str[19]] == 0xFF ||
- ulidDec[str[20]] == 0xFF ||
- ulidDec[str[21]] == 0xFF ||
- ulidDec[str[22]] == 0xFF ||
- ulidDec[str[23]] == 0xFF ||
- ulidDec[str[24]] == 0xFF ||
- ulidDec[str[25]] == 0xFF {
- return false
- }
-
- // Check if the first character in a base32 encoded ULID will overflow. This
- // happens because the base32 representation encodes 130 bits, while the
- // ULID is only 128 bits.
- //
- // See https://github.com/oklog/ulid/issues/9 for details.
- if str[0] > '7' {
- return false
- }
- return true
-}
-
-// IsCreditCard checks if the string is a credit card.
-func IsCreditCard(str string) bool {
- sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
- if !rxCreditCard.MatchString(sanitized) {
- return false
- }
- var sum int64
- var digit string
- var tmpNum int64
- var shouldDouble bool
- for i := len(sanitized) - 1; i >= 0; i-- {
- digit = sanitized[i:(i + 1)]
- tmpNum, _ = ToInt(digit)
- if shouldDouble {
- tmpNum *= 2
- if tmpNum >= 10 {
- sum += (tmpNum % 10) + 1
- } else {
- sum += tmpNum
- }
- } else {
- sum += tmpNum
- }
- shouldDouble = !shouldDouble
- }
-
- return sum%10 == 0
-}
-
-// IsISBN10 checks if the string is an ISBN version 10.
-func IsISBN10(str string) bool {
- return IsISBN(str, 10)
-}
-
-// IsISBN13 checks if the string is an ISBN version 13.
-func IsISBN13(str string) bool {
- return IsISBN(str, 13)
-}
-
-// IsISBN checks if the string is an ISBN (version 10 or 13).
-// If version value is not equal to 10 or 13, it will be checks both variants.
-func IsISBN(str string, version int) bool {
- sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "")
- var checksum int32
- var i int32
- if version == 10 {
- if !rxISBN10.MatchString(sanitized) {
- return false
- }
- for i = 0; i < 9; i++ {
- checksum += (i + 1) * int32(sanitized[i]-'0')
- }
- if sanitized[9] == 'X' {
- checksum += 10 * 10
- } else {
- checksum += 10 * int32(sanitized[9]-'0')
- }
- if checksum%11 == 0 {
- return true
- }
- return false
- } else if version == 13 {
- if !rxISBN13.MatchString(sanitized) {
- return false
- }
- factor := []int32{1, 3}
- for i = 0; i < 12; i++ {
- checksum += factor[i%2] * int32(sanitized[i]-'0')
- }
- return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0
- }
- return IsISBN(str, 10) || IsISBN(str, 13)
-}
-
-// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal).
-func IsJSON(str string) bool {
- var js json.RawMessage
- return json.Unmarshal([]byte(str), &js) == nil
-}
-
-// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid.
-func IsMultibyte(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxMultibyte.MatchString(str)
-}
-
-// IsASCII checks if the string contains ASCII chars only. Empty string is valid.
-func IsASCII(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxASCII.MatchString(str)
-}
-
-// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid.
-func IsPrintableASCII(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxPrintableASCII.MatchString(str)
-}
-
-// IsFullWidth checks if the string contains any full-width chars. Empty string is valid.
-func IsFullWidth(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxFullWidth.MatchString(str)
-}
-
-// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid.
-func IsHalfWidth(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxHalfWidth.MatchString(str)
-}
-
-// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid.
-func IsVariableWidth(str string) bool {
- if IsNull(str) {
- return true
- }
- return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str)
-}
-
-// IsBase64 checks if a string is base64 encoded.
-func IsBase64(str string) bool {
- return rxBase64.MatchString(str)
-}
-
-// IsFilePath checks is a string is Win or Unix file path and returns it's type.
-func IsFilePath(str string) (bool, int) {
- if rxWinPath.MatchString(str) {
- //check windows path limit see:
- // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
- if len(str[3:]) > 32767 {
- return false, Win
- }
- return true, Win
- } else if rxUnixPath.MatchString(str) {
- return true, Unix
- }
- return false, Unknown
-}
-
-//IsWinFilePath checks both relative & absolute paths in Windows
-func IsWinFilePath(str string) bool {
- if rxARWinPath.MatchString(str) {
- //check windows path limit see:
- // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath
- if len(str[3:]) > 32767 {
- return false
- }
- return true
- }
- return false
-}
-
-//IsUnixFilePath checks both relative & absolute paths in Unix
-func IsUnixFilePath(str string) bool {
- if rxARUnixPath.MatchString(str) {
- return true
- }
- return false
-}
-
-// IsDataURI checks if a string is base64 encoded data URI such as an image
-func IsDataURI(str string) bool {
- dataURI := strings.Split(str, ",")
- if !rxDataURI.MatchString(dataURI[0]) {
- return false
- }
- return IsBase64(dataURI[1])
-}
-
-// IsMagnetURI checks if a string is valid magnet URI
-func IsMagnetURI(str string) bool {
- return rxMagnetURI.MatchString(str)
-}
-
-// IsISO3166Alpha2 checks if a string is valid two-letter country code
-func IsISO3166Alpha2(str string) bool {
- for _, entry := range ISO3166List {
- if str == entry.Alpha2Code {
- return true
- }
- }
- return false
-}
-
-// IsISO3166Alpha3 checks if a string is valid three-letter country code
-func IsISO3166Alpha3(str string) bool {
- for _, entry := range ISO3166List {
- if str == entry.Alpha3Code {
- return true
- }
- }
- return false
-}
-
-// IsISO693Alpha2 checks if a string is valid two-letter language code
-func IsISO693Alpha2(str string) bool {
- for _, entry := range ISO693List {
- if str == entry.Alpha2Code {
- return true
- }
- }
- return false
-}
-
-// IsISO693Alpha3b checks if a string is valid three-letter language code
-func IsISO693Alpha3b(str string) bool {
- for _, entry := range ISO693List {
- if str == entry.Alpha3bCode {
- return true
- }
- }
- return false
-}
-
-// IsDNSName will validate the given string as a DNS name
-func IsDNSName(str string) bool {
- if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 {
- // constraints already violated
- return false
- }
- return !IsIP(str) && rxDNSName.MatchString(str)
-}
-
-// IsHash checks if a string is a hash of type algorithm.
-// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b']
-func IsHash(str string, algorithm string) bool {
- var len string
- algo := strings.ToLower(algorithm)
-
- if algo == "crc32" || algo == "crc32b" {
- len = "8"
- } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" {
- len = "32"
- } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" {
- len = "40"
- } else if algo == "tiger192" {
- len = "48"
- } else if algo == "sha3-224" {
- len = "56"
- } else if algo == "sha256" || algo == "sha3-256" {
- len = "64"
- } else if algo == "sha384" || algo == "sha3-384" {
- len = "96"
- } else if algo == "sha512" || algo == "sha3-512" {
- len = "128"
- } else {
- return false
- }
-
- return Matches(str, "^[a-f0-9]{"+len+"}$")
-}
-
-// IsSHA3224 checks is a string is a SHA3-224 hash. Alias for `IsHash(str, "sha3-224")`
-func IsSHA3224(str string) bool {
- return IsHash(str, "sha3-224")
-}
-
-// IsSHA3256 checks is a string is a SHA3-256 hash. Alias for `IsHash(str, "sha3-256")`
-func IsSHA3256(str string) bool {
- return IsHash(str, "sha3-256")
-}
-
-// IsSHA3384 checks is a string is a SHA3-384 hash. Alias for `IsHash(str, "sha3-384")`
-func IsSHA3384(str string) bool {
- return IsHash(str, "sha3-384")
-}
-
-// IsSHA3512 checks is a string is a SHA3-512 hash. Alias for `IsHash(str, "sha3-512")`
-func IsSHA3512(str string) bool {
- return IsHash(str, "sha3-512")
-}
-
-// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")`
-func IsSHA512(str string) bool {
- return IsHash(str, "sha512")
-}
-
-// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")`
-func IsSHA384(str string) bool {
- return IsHash(str, "sha384")
-}
-
-// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")`
-func IsSHA256(str string) bool {
- return IsHash(str, "sha256")
-}
-
-// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")`
-func IsTiger192(str string) bool {
- return IsHash(str, "tiger192")
-}
-
-// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")`
-func IsTiger160(str string) bool {
- return IsHash(str, "tiger160")
-}
-
-// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")`
-func IsRipeMD160(str string) bool {
- return IsHash(str, "ripemd160")
-}
-
-// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")`
-func IsSHA1(str string) bool {
- return IsHash(str, "sha1")
-}
-
-// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")`
-func IsTiger128(str string) bool {
- return IsHash(str, "tiger128")
-}
-
-// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")`
-func IsRipeMD128(str string) bool {
- return IsHash(str, "ripemd128")
-}
-
-// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")`
-func IsCRC32(str string) bool {
- return IsHash(str, "crc32")
-}
-
-// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")`
-func IsCRC32b(str string) bool {
- return IsHash(str, "crc32b")
-}
-
-// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")`
-func IsMD5(str string) bool {
- return IsHash(str, "md5")
-}
-
-// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")`
-func IsMD4(str string) bool {
- return IsHash(str, "md4")
-}
-
-// IsDialString validates the given string for usage with the various Dial() functions
-func IsDialString(str string) bool {
- if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) {
- return true
- }
-
- return false
-}
-
-// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP`
-func IsIP(str string) bool {
- return net.ParseIP(str) != nil
-}
-
-// IsPort checks if a string represents a valid port
-func IsPort(str string) bool {
- if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 {
- return true
- }
- return false
-}
-
-// IsIPv4 checks if the string is an IP version 4.
-func IsIPv4(str string) bool {
- ip := net.ParseIP(str)
- return ip != nil && strings.Contains(str, ".")
-}
-
-// IsIPv6 checks if the string is an IP version 6.
-func IsIPv6(str string) bool {
- ip := net.ParseIP(str)
- return ip != nil && strings.Contains(str, ":")
-}
-
-// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6)
-func IsCIDR(str string) bool {
- _, _, err := net.ParseCIDR(str)
- return err == nil
-}
-
-// IsMAC checks if a string is valid MAC address.
-// Possible MAC formats:
-// 01:23:45:67:89:ab
-// 01:23:45:67:89:ab:cd:ef
-// 01-23-45-67-89-ab
-// 01-23-45-67-89-ab-cd-ef
-// 0123.4567.89ab
-// 0123.4567.89ab.cdef
-func IsMAC(str string) bool {
- _, err := net.ParseMAC(str)
- return err == nil
-}
-
-// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name
-func IsHost(str string) bool {
- return IsIP(str) || IsDNSName(str)
-}
-
-// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId.
-func IsMongoID(str string) bool {
- return rxHexadecimal.MatchString(str) && (len(str) == 24)
-}
-
-// IsLatitude checks if a string is valid latitude.
-func IsLatitude(str string) bool {
- return rxLatitude.MatchString(str)
-}
-
-// IsLongitude checks if a string is valid longitude.
-func IsLongitude(str string) bool {
- return rxLongitude.MatchString(str)
-}
-
-// IsIMEI checks if a string is valid IMEI
-func IsIMEI(str string) bool {
- return rxIMEI.MatchString(str)
-}
-
-// IsIMSI checks if a string is valid IMSI
-func IsIMSI(str string) bool {
- if !rxIMSI.MatchString(str) {
- return false
- }
-
- mcc, err := strconv.ParseInt(str[0:3], 10, 32)
- if err != nil {
- return false
- }
-
- switch mcc {
- case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219:
- case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235:
- case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257:
- case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278:
- case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293:
- case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314:
- case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346:
- case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364:
- case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402:
- case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417:
- case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428:
- case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441:
- case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467:
- case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528:
- case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545:
- case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555:
- case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611:
- case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621:
- case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631:
- case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641:
- case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652:
- case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708:
- case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736:
- case 738, 740, 742, 744, 746, 748, 750, 995:
- return true
- default:
- return false
- }
- return true
-}
-
-// IsRsaPublicKey checks if a string is valid public key with provided length
-func IsRsaPublicKey(str string, keylen int) bool {
- bb := bytes.NewBufferString(str)
- pemBytes, err := ioutil.ReadAll(bb)
- if err != nil {
- return false
- }
- block, _ := pem.Decode(pemBytes)
- if block != nil && block.Type != "PUBLIC KEY" {
- return false
- }
- var der []byte
-
- if block != nil {
- der = block.Bytes
- } else {
- der, err = base64.StdEncoding.DecodeString(str)
- if err != nil {
- return false
- }
- }
-
- key, err := x509.ParsePKIXPublicKey(der)
- if err != nil {
- return false
- }
- pubkey, ok := key.(*rsa.PublicKey)
- if !ok {
- return false
- }
- bitlen := len(pubkey.N.Bytes()) * 8
- return bitlen == int(keylen)
-}
-
-// IsRegex checks if a give string is a valid regex with RE2 syntax or not
-func IsRegex(str string) bool {
- if _, err := regexp.Compile(str); err == nil {
- return true
- }
- return false
-}
-
-func toJSONName(tag string) string {
- if tag == "" {
- return ""
- }
-
- // JSON name always comes first. If there's no options then split[0] is
- // JSON name, if JSON name is not set, then split[0] is an empty string.
- split := strings.SplitN(tag, ",", 2)
-
- name := split[0]
-
- // However it is possible that the field is skipped when
- // (de-)serializing from/to JSON, in which case assume that there is no
- // tag name to use
- if name == "-" {
- return ""
- }
- return name
-}
-
-func prependPathToErrors(err error, path string) error {
- switch err2 := err.(type) {
- case Error:
- err2.Path = append([]string{path}, err2.Path...)
- return err2
- case Errors:
- errors := err2.Errors()
- for i, err3 := range errors {
- errors[i] = prependPathToErrors(err3, path)
- }
- return err2
- }
- return err
-}
-
-// ValidateArray performs validation according to condition iterator that validates every element of the array
-func ValidateArray(array []interface{}, iterator ConditionIterator) bool {
- return Every(array, iterator)
-}
-
-// ValidateMap use validation map for fields.
-// result will be equal to `false` if there are any errors.
-// s is the map containing the data to be validated.
-// m is the validation map in the form:
-// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}}
-func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) {
- if s == nil {
- return true, nil
- }
- result := true
- var err error
- var errs Errors
- var index int
- val := reflect.ValueOf(s)
- for key, value := range s {
- presentResult := true
- validator, ok := m[key]
- if !ok {
- presentResult = false
- var err error
- err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key)
- err = prependPathToErrors(err, key)
- errs = append(errs, err)
- }
- valueField := reflect.ValueOf(value)
- mapResult := true
- typeResult := true
- structResult := true
- resultField := true
- switch subValidator := validator.(type) {
- case map[string]interface{}:
- var err error
- if v, ok := value.(map[string]interface{}); !ok {
- mapResult = false
- err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String())
- err = prependPathToErrors(err, key)
- errs = append(errs, err)
- } else {
- mapResult, err = ValidateMap(v, subValidator)
- if err != nil {
- mapResult = false
- err = prependPathToErrors(err, key)
- errs = append(errs, err)
- }
- }
- case string:
- if (valueField.Kind() == reflect.Struct ||
- (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
- subValidator != "-" {
- var err error
- structResult, err = ValidateStruct(valueField.Interface())
- if err != nil {
- err = prependPathToErrors(err, key)
- errs = append(errs, err)
- }
- }
- resultField, err = typeCheck(valueField, reflect.StructField{
- Name: key,
- PkgPath: "",
- Type: val.Type(),
- Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)),
- Offset: 0,
- Index: []int{index},
- Anonymous: false,
- }, val, nil)
- if err != nil {
- errs = append(errs, err)
- }
- case nil:
- // already handlerd when checked before
- default:
- typeResult = false
- err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String())
- err = prependPathToErrors(err, key)
- errs = append(errs, err)
- }
- result = result && presentResult && typeResult && resultField && structResult && mapResult
- index++
- }
- // checks required keys
- requiredResult := true
- for key, value := range m {
- if schema, ok := value.(string); ok {
- tags := parseTagIntoMap(schema)
- if required, ok := tags["required"]; ok {
- if _, ok := s[key]; !ok {
- requiredResult = false
- if required.customErrorMessage != "" {
- err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}}
- } else {
- err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}}
- }
- errs = append(errs, err)
- }
- }
- }
- }
-
- if len(errs) > 0 {
- err = errs
- }
- return result && requiredResult, err
-}
-
-// ValidateStruct use tags for fields.
-// result will be equal to `false` if there are any errors.
-// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail)
-func ValidateStruct(s interface{}) (bool, error) {
- if s == nil {
- return true, nil
- }
- result := true
- var err error
- val := reflect.ValueOf(s)
- if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr {
- val = val.Elem()
- }
- // we only accept structs
- if val.Kind() != reflect.Struct {
- return false, fmt.Errorf("function only accepts structs; got %s", val.Kind())
- }
- var errs Errors
- for i := 0; i < val.NumField(); i++ {
- valueField := val.Field(i)
- typeField := val.Type().Field(i)
- if typeField.PkgPath != "" {
- continue // Private field
- }
- structResult := true
- if valueField.Kind() == reflect.Interface {
- valueField = valueField.Elem()
- }
- if (valueField.Kind() == reflect.Struct ||
- (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) &&
- typeField.Tag.Get(tagName) != "-" {
- var err error
- structResult, err = ValidateStruct(valueField.Interface())
- if err != nil {
- err = prependPathToErrors(err, typeField.Name)
- errs = append(errs, err)
- }
- }
- resultField, err2 := typeCheck(valueField, typeField, val, nil)
- if err2 != nil {
-
- // Replace structure name with JSON name if there is a tag on the variable
- jsonTag := toJSONName(typeField.Tag.Get("json"))
- if jsonTag != "" {
- switch jsonError := err2.(type) {
- case Error:
- jsonError.Name = jsonTag
- err2 = jsonError
- case Errors:
- for i2, err3 := range jsonError {
- switch customErr := err3.(type) {
- case Error:
- customErr.Name = jsonTag
- jsonError[i2] = customErr
- }
- }
-
- err2 = jsonError
- }
- }
-
- errs = append(errs, err2)
- }
- result = result && resultField && structResult
- }
- if len(errs) > 0 {
- err = errs
- }
- return result, err
-}
-
-// ValidateStructAsync performs async validation of the struct and returns results through the channels
-func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) {
- res := make(chan bool)
- errors := make(chan error)
-
- go func() {
- defer close(res)
- defer close(errors)
-
- isValid, isFailed := ValidateStruct(s)
-
- res <- isValid
- errors <- isFailed
- }()
-
- return res, errors
-}
-
-// ValidateMapAsync performs async validation of the map and returns results through the channels
-func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) {
- res := make(chan bool)
- errors := make(chan error)
-
- go func() {
- defer close(res)
- defer close(errors)
-
- isValid, isFailed := ValidateMap(s, m)
-
- res <- isValid
- errors <- isFailed
- }()
-
- return res, errors
-}
-
-// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""}
-func parseTagIntoMap(tag string) tagOptionsMap {
- optionsMap := make(tagOptionsMap)
- options := strings.Split(tag, ",")
-
- for i, option := range options {
- option = strings.TrimSpace(option)
-
- validationOptions := strings.Split(option, "~")
- if !isValidTag(validationOptions[0]) {
- continue
- }
- if len(validationOptions) == 2 {
- optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i}
- } else {
- optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i}
- }
- }
- return optionsMap
-}
-
-func isValidTag(s string) bool {
- if s == "" {
- return false
- }
- for _, c := range s {
- switch {
- case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
- // Backslash and quote chars are reserved, but
- // otherwise any punctuation chars are allowed
- // in a tag name.
- default:
- if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
- return false
- }
- }
- }
- return true
-}
-
-// IsSSN will validate the given string as a U.S. Social Security Number
-func IsSSN(str string) bool {
- if str == "" || len(str) != 11 {
- return false
- }
- return rxSSN.MatchString(str)
-}
-
-// IsSemver checks if string is valid semantic version
-func IsSemver(str string) bool {
- return rxSemver.MatchString(str)
-}
-
-// IsType checks if interface is of some type
-func IsType(v interface{}, params ...string) bool {
- if len(params) == 1 {
- typ := params[0]
- return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1)
- }
- return false
-}
-
-// IsTime checks if string is valid according to given format
-func IsTime(str string, format string) bool {
- _, err := time.Parse(format, str)
- return err == nil
-}
-
-// IsUnixTime checks if string is valid unix timestamp value
-func IsUnixTime(str string) bool {
- if _, err := strconv.Atoi(str); err == nil {
- return true
- }
- return false
-}
-
-// IsRFC3339 checks if string is valid timestamp value according to RFC3339
-func IsRFC3339(str string) bool {
- return IsTime(str, time.RFC3339)
-}
-
-// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone.
-func IsRFC3339WithoutZone(str string) bool {
- return IsTime(str, rfc3339WithoutZone)
-}
-
-// IsISO4217 checks if string is valid ISO currency code
-func IsISO4217(str string) bool {
- for _, currency := range ISO4217List {
- if str == currency {
- return true
- }
- }
-
- return false
-}
-
-// ByteLength checks string's length
-func ByteLength(str string, params ...string) bool {
- if len(params) == 2 {
- min, _ := ToInt(params[0])
- max, _ := ToInt(params[1])
- return len(str) >= int(min) && len(str) <= int(max)
- }
-
- return false
-}
-
-// RuneLength checks string's length
-// Alias for StringLength
-func RuneLength(str string, params ...string) bool {
- return StringLength(str, params...)
-}
-
-// IsRsaPub checks whether string is valid RSA key
-// Alias for IsRsaPublicKey
-func IsRsaPub(str string, params ...string) bool {
- if len(params) == 1 {
- len, _ := ToInt(params[0])
- return IsRsaPublicKey(str, int(len))
- }
-
- return false
-}
-
-// StringMatches checks if a string matches a given pattern.
-func StringMatches(s string, params ...string) bool {
- if len(params) == 1 {
- pattern := params[0]
- return Matches(s, pattern)
- }
- return false
-}
-
-// StringLength checks string's length (including multi byte strings)
-func StringLength(str string, params ...string) bool {
-
- if len(params) == 2 {
- strLength := utf8.RuneCountInString(str)
- min, _ := ToInt(params[0])
- max, _ := ToInt(params[1])
- return strLength >= int(min) && strLength <= int(max)
- }
-
- return false
-}
-
-// MinStringLength checks string's minimum length (including multi byte strings)
-func MinStringLength(str string, params ...string) bool {
-
- if len(params) == 1 {
- strLength := utf8.RuneCountInString(str)
- min, _ := ToInt(params[0])
- return strLength >= int(min)
- }
-
- return false
-}
-
-// MaxStringLength checks string's maximum length (including multi byte strings)
-func MaxStringLength(str string, params ...string) bool {
-
- if len(params) == 1 {
- strLength := utf8.RuneCountInString(str)
- max, _ := ToInt(params[0])
- return strLength <= int(max)
- }
-
- return false
-}
-
-// Range checks string's length
-func Range(str string, params ...string) bool {
- if len(params) == 2 {
- value, _ := ToFloat(str)
- min, _ := ToFloat(params[0])
- max, _ := ToFloat(params[1])
- return InRange(value, min, max)
- }
-
- return false
-}
-
-// IsInRaw checks if string is in list of allowed values
-func IsInRaw(str string, params ...string) bool {
- if len(params) == 1 {
- rawParams := params[0]
-
- parsedParams := strings.Split(rawParams, "|")
-
- return IsIn(str, parsedParams...)
- }
-
- return false
-}
-
-// IsIn checks if string str is a member of the set of strings params
-func IsIn(str string, params ...string) bool {
- for _, param := range params {
- if str == param {
- return true
- }
- }
-
- return false
-}
-
-func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) {
- if nilPtrAllowedByRequired {
- k := v.Kind()
- if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() {
- return true, nil
- }
- }
-
- if requiredOption, isRequired := options["required"]; isRequired {
- if len(requiredOption.customErrorMessage) > 0 {
- return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}}
- }
- return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}}
- } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional {
- return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}}
- }
- // not required and empty is valid
- return true, nil
-}
-
-func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) {
- if !v.IsValid() {
- return false, nil
- }
-
- tag := t.Tag.Get(tagName)
-
- // checks if the field should be ignored
- switch tag {
- case "":
- if v.Kind() != reflect.Slice && v.Kind() != reflect.Map {
- if !fieldsRequiredByDefault {
- return true, nil
- }
- return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}}
- }
- case "-":
- return true, nil
- }
-
- isRootType := false
- if options == nil {
- isRootType = true
- options = parseTagIntoMap(tag)
- }
-
- if isEmptyValue(v) {
- // an empty value is not validated, checks only required
- isValid, resultErr = checkRequired(v, t, options)
- for key := range options {
- delete(options, key)
- }
- return isValid, resultErr
- }
-
- var customTypeErrors Errors
- optionsOrder := options.orderedKeys()
- for _, validatorName := range optionsOrder {
- validatorStruct := options[validatorName]
- if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok {
- delete(options, validatorName)
-
- if result := validatefunc(v.Interface(), o.Interface()); !result {
- if len(validatorStruct.customErrorMessage) > 0 {
- customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)})
- continue
- }
- customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)})
- }
- }
- }
-
- if len(customTypeErrors.Errors()) > 0 {
- return false, customTypeErrors
- }
-
- if isRootType {
- // Ensure that we've checked the value by all specified validators before report that the value is valid
- defer func() {
- delete(options, "optional")
- delete(options, "required")
-
- if isValid && resultErr == nil && len(options) != 0 {
- optionsOrder := options.orderedKeys()
- for _, validator := range optionsOrder {
- isValid = false
- resultErr = Error{t.Name, fmt.Errorf(
- "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}}
- return
- }
- }
- }()
- }
-
- for _, validatorSpec := range optionsOrder {
- validatorStruct := options[validatorSpec]
- var negate bool
- validator := validatorSpec
- customMsgExists := len(validatorStruct.customErrorMessage) > 0
-
- // checks whether the tag looks like '!something' or 'something'
- if validator[0] == '!' {
- validator = validator[1:]
- negate = true
- }
-
- // checks for interface param validators
- for key, value := range InterfaceParamTagRegexMap {
- ps := value.FindStringSubmatch(validator)
- if len(ps) == 0 {
- continue
- }
-
- validatefunc, ok := InterfaceParamTagMap[key]
- if !ok {
- continue
- }
-
- delete(options, validatorSpec)
-
- field := fmt.Sprint(v)
- if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) {
- if customMsgExists {
- return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- if negate {
- return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- }
- }
-
- switch v.Kind() {
- case reflect.Bool,
- reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
- reflect.Float32, reflect.Float64,
- reflect.String:
- // for each tag option checks the map of validator functions
- for _, validatorSpec := range optionsOrder {
- validatorStruct := options[validatorSpec]
- var negate bool
- validator := validatorSpec
- customMsgExists := len(validatorStruct.customErrorMessage) > 0
-
- // checks whether the tag looks like '!something' or 'something'
- if validator[0] == '!' {
- validator = validator[1:]
- negate = true
- }
-
- // checks for param validators
- for key, value := range ParamTagRegexMap {
- ps := value.FindStringSubmatch(validator)
- if len(ps) == 0 {
- continue
- }
-
- validatefunc, ok := ParamTagMap[key]
- if !ok {
- continue
- }
-
- delete(options, validatorSpec)
-
- switch v.Kind() {
- case reflect.String,
- reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
- reflect.Float32, reflect.Float64:
-
- field := fmt.Sprint(v) // make value into string, then validate with regex
- if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) {
- if customMsgExists {
- return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- if negate {
- return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- default:
- // type not yet supported, fail
- return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}}
- }
- }
-
- if validatefunc, ok := TagMap[validator]; ok {
- delete(options, validatorSpec)
-
- switch v.Kind() {
- case reflect.String,
- reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
- reflect.Float32, reflect.Float64:
- field := fmt.Sprint(v) // make value into string, then validate with regex
- if result := validatefunc(field); !result && !negate || result && negate {
- if customMsgExists {
- return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- if negate {
- return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}}
- }
- default:
- //Not Yet Supported Types (Fail here!)
- err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v)
- return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}}
- }
- }
- }
- return true, nil
- case reflect.Map:
- if v.Type().Key().Kind() != reflect.String {
- return false, &UnsupportedTypeError{v.Type()}
- }
- var sv stringValues
- sv = v.MapKeys()
- sort.Sort(sv)
- result := true
- for i, k := range sv {
- var resultItem bool
- var err error
- if v.MapIndex(k).Kind() != reflect.Struct {
- resultItem, err = typeCheck(v.MapIndex(k), t, o, options)
- if err != nil {
- return false, err
- }
- } else {
- resultItem, err = ValidateStruct(v.MapIndex(k).Interface())
- if err != nil {
- err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string))
- return false, err
- }
- }
- result = result && resultItem
- }
- return result, nil
- case reflect.Slice, reflect.Array:
- result := true
- for i := 0; i < v.Len(); i++ {
- var resultItem bool
- var err error
- if v.Index(i).Kind() != reflect.Struct {
- resultItem, err = typeCheck(v.Index(i), t, o, options)
- if err != nil {
- return false, err
- }
- } else {
- resultItem, err = ValidateStruct(v.Index(i).Interface())
- if err != nil {
- err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i))
- return false, err
- }
- }
- result = result && resultItem
- }
- return result, nil
- case reflect.Interface:
- // If the value is an interface then encode its element
- if v.IsNil() {
- return true, nil
- }
- return ValidateStruct(v.Interface())
- case reflect.Ptr:
- // If the value is a pointer then checks its element
- if v.IsNil() {
- return true, nil
- }
- return typeCheck(v.Elem(), t, o, options)
- case reflect.Struct:
- return true, nil
- default:
- return false, &UnsupportedTypeError{v.Type()}
- }
-}
-
-func stripParams(validatorString string) string {
- return paramsRegexp.ReplaceAllString(validatorString, "")
-}
-
-// isEmptyValue checks whether value empty or not
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.String, reflect.Array:
- return v.Len() == 0
- case reflect.Map, reflect.Slice:
- return v.Len() == 0 || v.IsNil()
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
-
- return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface())
-}
-
-// ErrorByField returns error for specified field of the struct
-// validated by ValidateStruct or empty string if there are no errors
-// or this field doesn't exists or doesn't have any errors.
-func ErrorByField(e error, field string) string {
- if e == nil {
- return ""
- }
- return ErrorsByField(e)[field]
-}
-
-// ErrorsByField returns map of errors of the struct validated
-// by ValidateStruct or empty map if there are no errors.
-func ErrorsByField(e error) map[string]string {
- m := make(map[string]string)
- if e == nil {
- return m
- }
- // prototype for ValidateStruct
-
- switch e := e.(type) {
- case Error:
- m[e.Name] = e.Err.Error()
- case Errors:
- for _, item := range e.Errors() {
- n := ErrorsByField(item)
- for k, v := range n {
- m[k] = v
- }
- }
- }
-
- return m
-}
-
-// Error returns string equivalent for reflect.Type
-func (e *UnsupportedTypeError) Error() string {
- return "validator: unsupported type: " + e.Type.String()
-}
-
-func (sv stringValues) Len() int { return len(sv) }
-func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
-func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
-func (sv stringValues) get(i int) string { return sv[i].String() }
-
-func IsE164(str string) bool {
- return rxE164.MatchString(str)
-}
diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml
deleted file mode 100644
index bc5f7b086..000000000
--- a/vendor/github.com/asaskevich/govalidator/wercker.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-box: golang
-build:
- steps:
- - setup-go-workspace
-
- - script:
- name: go get
- code: |
- go version
- go get -t ./...
-
- - script:
- name: go test
- code: |
- go test -race -v ./...
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
new file mode 100644
index 000000000..899129ecc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go
new file mode 100644
index 000000000..6504a2186
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go
@@ -0,0 +1,18 @@
+package aws
+
+// AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing.
+type AccountIDEndpointMode string
+
+const (
+ // AccountIDEndpointModeUnset indicates the AWS account ID will not be used for endpoint routing
+ AccountIDEndpointModeUnset AccountIDEndpointMode = ""
+
+ // AccountIDEndpointModePreferred indicates the AWS account ID will be used for endpoint routing if present
+ AccountIDEndpointModePreferred = "preferred"
+
+ // AccountIDEndpointModeRequired indicates an error will be returned if the AWS account ID is not resolved from identity
+ AccountIDEndpointModeRequired = "required"
+
+ // AccountIDEndpointModeDisabled indicates the AWS account ID will be ignored during endpoint routing
+ AccountIDEndpointModeDisabled = "disabled"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go
new file mode 100644
index 000000000..fe63fedad
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/arn/arn.go
@@ -0,0 +1,92 @@
+// Package arn provides a parser for interacting with Amazon Resource Names.
+package arn
+
+import (
+ "errors"
+ "strings"
+)
+
+const (
+ arnDelimiter = ":"
+ arnSections = 6
+ arnPrefix = "arn:"
+
+ // zero-indexed
+ sectionPartition = 1
+ sectionService = 2
+ sectionRegion = 3
+ sectionAccountID = 4
+ sectionResource = 5
+
+ // errors
+ invalidPrefix = "arn: invalid prefix"
+ invalidSections = "arn: not enough sections"
+)
+
+// ARN captures the individual fields of an Amazon Resource Name.
+// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information.
+type ARN struct {
+ // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in
+ // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China
+ // (Beijing) region is "aws-cn".
+ Partition string
+
+ // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of
+ // namespaces, see
+ // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces.
+ Service string
+
+ // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this
+ // component might be omitted.
+ Region string
+
+ // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the
+ // ARNs for some resources don't require an account number, so this component might be omitted.
+ AccountID string
+
+ // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource —
+ // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the
+ // resource name itself. Some services allows paths for resource names, as described in
+ // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths.
+ Resource string
+}
+
+// Parse parses an ARN into its constituent parts.
+//
+// Some example ARNs:
+// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment
+// arn:aws:iam::123456789012:user/David
+// arn:aws:rds:eu-west-1:123456789012:db:mysql-db
+// arn:aws:s3:::my_corporate_bucket/exampleobject.png
+func Parse(arn string) (ARN, error) {
+ if !strings.HasPrefix(arn, arnPrefix) {
+ return ARN{}, errors.New(invalidPrefix)
+ }
+ sections := strings.SplitN(arn, arnDelimiter, arnSections)
+ if len(sections) != arnSections {
+ return ARN{}, errors.New(invalidSections)
+ }
+ return ARN{
+ Partition: sections[sectionPartition],
+ Service: sections[sectionService],
+ Region: sections[sectionRegion],
+ AccountID: sections[sectionAccountID],
+ Resource: sections[sectionResource],
+ }, nil
+}
+
+// IsARN returns whether the given string is an arn
+// by looking for whether the string starts with arn:
+func IsARN(arn string) bool {
+ return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1
+}
+
+// String returns the canonical representation of the ARN
+func (arn ARN) String() string {
+ return arnPrefix +
+ arn.Partition + arnDelimiter +
+ arn.Service + arnDelimiter +
+ arn.Region + arnDelimiter +
+ arn.AccountID + arnDelimiter +
+ arn.Resource
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go
new file mode 100644
index 000000000..4152caade
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go
@@ -0,0 +1,33 @@
+package aws
+
+// RequestChecksumCalculation controls request checksum calculation workflow
+type RequestChecksumCalculation int
+
+const (
+ // RequestChecksumCalculationUnset is the unset value for RequestChecksumCalculation
+ RequestChecksumCalculationUnset RequestChecksumCalculation = iota
+
+ // RequestChecksumCalculationWhenSupported indicates request checksum will be calculated
+ // if the operation supports input checksums
+ RequestChecksumCalculationWhenSupported
+
+ // RequestChecksumCalculationWhenRequired indicates request checksum will be calculated
+ // if required by the operation or if user elects to set a checksum algorithm in request
+ RequestChecksumCalculationWhenRequired
+)
+
+// ResponseChecksumValidation controls response checksum validation workflow
+type ResponseChecksumValidation int
+
+const (
+ // ResponseChecksumValidationUnset is the unset value for ResponseChecksumValidation
+ ResponseChecksumValidationUnset ResponseChecksumValidation = iota
+
+ // ResponseChecksumValidationWhenSupported indicates response checksum will be validated
+ // if the operation supports output checksums
+ ResponseChecksumValidationWhenSupported
+
+ // ResponseChecksumValidationWhenRequired indicates response checksum will only
+ // be validated if the operation requires output checksum validation
+ ResponseChecksumValidationWhenRequired
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
new file mode 100644
index 000000000..3219517da
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
@@ -0,0 +1,250 @@
+package aws
+
+import (
+ "net/http"
+
+ smithybearer "github.com/aws/smithy-go/auth/bearer"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// HTTPClient provides the interface to provide custom HTTPClients. Generally
+// *http.Client is sufficient for most use cases. The HTTPClient should not
+// follow 301 or 302 redirects.
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// A Config provides service configuration for service clients.
+type Config struct {
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // See http://docs.aws.amazon.com/general/latest/gr/rande.html for
+ // information on AWS regions.
+ Region string
+
+ // The credentials object to use when signing requests.
+ // Use the LoadDefaultConfig to load configuration from all the SDK's supported
+ // sources, and resolve credentials using the SDK's default credential chain.
+ Credentials CredentialsProvider
+
+ // The Bearer Authentication token provider to use for authenticating API
+ // operation calls with a Bearer Authentication token. The API clients and
+ // operation must support Bearer Authentication scheme in order for the
+ // token provider to be used. API clients created with NewFromConfig will
+ // automatically be configured with this option, if the API client support
+ // Bearer Authentication.
+ //
+ // The SDK's config.LoadDefaultConfig can automatically populate this
+ // option for external configuration options such as SSO session.
+ // https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+ BearerAuthTokenProvider smithybearer.TokenProvider
+
+ // The HTTP Client the SDK's API clients will use to invoke HTTP requests.
+ // The SDK defaults to a BuildableClient allowing API clients to create
+ // copies of the HTTP Client for service specific customizations.
+ //
+ // Use a (*http.Client) for custom behavior. Using a custom http.Client
+ // will prevent the SDK from modifying the HTTP client.
+ HTTPClient HTTPClient
+
+ // An endpoint resolver that can be used to provide or override an endpoint
+ // for the given service and region.
+ //
+ // See the `aws.EndpointResolver` documentation for additional usage
+ // information.
+ //
+ // Deprecated: See Config.EndpointResolverWithOptions
+ EndpointResolver EndpointResolver
+
+ // An endpoint resolver that can be used to provide or override an endpoint
+ // for the given service and region.
+ //
+ // When EndpointResolverWithOptions is specified, it will be used by a
+ // service client rather than using EndpointResolver if also specified.
+ //
+ // See the `aws.EndpointResolverWithOptions` documentation for additional
+ // usage information.
+ //
+ // Deprecated: with the release of endpoint resolution v2 in API clients,
+ // EndpointResolver and EndpointResolverWithOptions are deprecated.
+ // Providing a value for this field will likely prevent you from using
+ // newer endpoint-related service features. See API client options
+ // EndpointResolverV2 and BaseEndpoint.
+ EndpointResolverWithOptions EndpointResolverWithOptions
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client
+ // will call an operation that fails with a retryable error.
+ //
+ // API Clients will only use this value to construct a retryer if the
+ // Config.Retryer member is not nil. This value will be ignored if
+ // Retryer is not nil.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry model the API client will be created with.
+ //
+ // API Clients will only use this value to construct a retryer if the
+ // Config.Retryer member is not nil. This value will be ignored if
+ // Retryer is not nil.
+ RetryMode RetryMode
+
+ // Retryer is a function that provides a Retryer implementation. A Retryer
+ // guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer.
+ //
+ // In general, the provider function should return a new instance of a
+ // Retryer if you are attempting to provide a consistent Retryer
+ // configuration across all clients. This will ensure that each client will
+ // be provided a new instance of the Retryer implementation, and will avoid
+ // issues such as sharing the same retry token bucket across services.
+ //
+ // If not nil, RetryMaxAttempts, and RetryMode will be ignored by API
+ // clients.
+ Retryer func() Retryer
+
+ // ConfigSources are the sources that were used to construct the Config.
+ // Allows for additional configuration to be loaded by clients.
+ ConfigSources []interface{}
+
+ // APIOptions provides the set of middleware mutations modify how the API
+ // client requests will be handled. This is useful for adding additional
+ // tracing data to a request, or changing behavior of the SDK's client.
+ APIOptions []func(*middleware.Stack) error
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard error.
+ Logger logging.Logger
+
+ // Configures the events that will be sent to the configured logger. This
+ // can be used to configure the logging of signing, retries, request, and
+ // responses of the SDK clients.
+ //
+ // See the ClientLogMode type documentation for the complete set of logging
+ // modes and available configuration.
+ ClientLogMode ClientLogMode
+
+ // The configured DefaultsMode. If not specified, service clients will
+ // default to legacy.
+ //
+ // Supported modes are: auto, cross-region, in-region, legacy, mobile,
+ // standard
+ DefaultsMode DefaultsMode
+
+ // The RuntimeEnvironment configuration, only populated if the DefaultsMode
+ // is set to DefaultsModeAuto and is initialized by
+ // `config.LoadDefaultConfig`. You should not populate this structure
+ // programmatically, or rely on the values here within your applications.
+ RuntimeEnvironment RuntimeEnvironment
+
+ // AppId is an optional application specific identifier that can be set.
+ // When set it will be appended to the User-Agent header of every request
+ // in the form of App/{AppId}. This variable is sourced from environment
+ // variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id.
+ // See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for
+ // more information on environment variables and shared config settings.
+ AppID string
+
+ // BaseEndpoint is an intermediary transfer location to a service specific
+ // BaseEndpoint on a service's Options.
+ BaseEndpoint *string
+
+ // DisableRequestCompression toggles if an operation request could be
+ // compressed or not. Will be set to false by default. This variable is sourced from
+ // environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute
+ // disable_request_compression
+ DisableRequestCompression bool
+
+ // RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be
+ // compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively.
+ // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or
+ // the shared config profile attribute request_min_compression_size_bytes
+ RequestMinCompressSizeBytes int64
+
+ // Controls how a resolved AWS account ID is handled for endpoint routing.
+ AccountIDEndpointMode AccountIDEndpointMode
+
+ // RequestChecksumCalculation determines when request checksum calculation is performed.
+ //
+ // There are two possible values for this setting:
+ //
+ // 1. RequestChecksumCalculationWhenSupported (default): The checksum is always calculated
+ // if the operation supports it, regardless of whether the user sets an algorithm in the request.
+ //
+ // 2. RequestChecksumCalculationWhenRequired: The checksum is only calculated if the user
+ // explicitly sets a checksum algorithm in the request.
+ //
+ // This setting is sourced from the environment variable AWS_REQUEST_CHECKSUM_CALCULATION
+ // or the shared config profile attribute "request_checksum_calculation".
+ RequestChecksumCalculation RequestChecksumCalculation
+
+ // ResponseChecksumValidation determines when response checksum validation is performed
+ //
+ // There are two possible values for this setting:
+ //
+ // 1. ResponseChecksumValidationWhenSupported (default): The checksum is always validated
+ // if the operation supports it, regardless of whether the user sets the validation mode to ENABLED in request.
+ //
+ // 2. ResponseChecksumValidationWhenRequired: The checksum is only validated if the user
+ // explicitly sets the validation mode to ENABLED in the request
+ // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or
+ // the shared config profile attribute "response_checksum_validation".
+ ResponseChecksumValidation ResponseChecksumValidation
+
+ // Registry of HTTP interceptors.
+ Interceptors smithyhttp.InterceptorRegistry
+
+ // Priority list of preferred auth scheme IDs.
+ AuthSchemePreference []string
+
+ // ServiceOptions provides service specific configuration options that will be applied
+ // when constructing clients for specific services. Each callback function receives the service ID
+ // and the service's Options struct, allowing for dynamic configuration based on the service.
+ ServiceOptions []func(string, any)
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// Copy will return a shallow copy of the Config object.
+func (c Config) Copy() Config {
+ cp := c
+ return cp
+}
+
+// EndpointDiscoveryEnableState indicates if endpoint discovery is
+// enabled, disabled, auto or unset state.
+//
+// Default behavior (Auto or Unset) indicates operations that require endpoint
+// discovery will use Endpoint Discovery by default. Operations that
+// optionally use Endpoint Discovery will not use Endpoint Discovery
+// unless EndpointDiscovery is explicitly enabled.
+type EndpointDiscoveryEnableState uint
+
+// Enumeration values for EndpointDiscoveryEnableState
+const (
+ // EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset.
+ // Users do not need to use this value explicitly. The behavior for unset
+ // is the same as for EndpointDiscoveryAuto.
+ EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota
+
+ // EndpointDiscoveryAuto represents an AUTO state that allows endpoint
+ // discovery only when required by the api. This is the default
+ // configuration resolved by the client if endpoint discovery is neither
+ // enabled or disabled.
+ EndpointDiscoveryAuto // default state
+
+ // EndpointDiscoveryDisabled indicates client MUST not perform endpoint
+ // discovery even when required.
+ EndpointDiscoveryDisabled
+
+ // EndpointDiscoveryEnabled indicates client MUST always perform endpoint
+ // discovery if supported for the operation.
+ EndpointDiscoveryEnabled
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go
new file mode 100644
index 000000000..4d8e26ef3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go
@@ -0,0 +1,22 @@
+package aws
+
+import (
+ "context"
+ "time"
+)
+
+type suppressedContext struct {
+ context.Context
+}
+
+func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
+ return time.Time{}, false
+}
+
+func (s *suppressedContext) Done() <-chan struct{} {
+ return nil
+}
+
+func (s *suppressedContext) Err() error {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
new file mode 100644
index 000000000..623890e8d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
@@ -0,0 +1,235 @@
+package aws
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand"
+ "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight"
+)
+
+// CredentialsCacheOptions are the options
+type CredentialsCacheOptions struct {
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // An ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired. This can cause an
+ // increased number of requests to refresh the credentials to occur.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // ExpiryWindowJitterFrac provides a mechanism for randomizing the
+ // expiration of credentials within the configured ExpiryWindow by a random
+ // percentage. Valid values are between 0.0 and 1.0.
+ //
+ // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac
+ // is 0.5 then credentials will be set to expire between 30 to 60 seconds
+ // prior to their actual expiration time.
+ //
+ // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored.
+ // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window.
+ // If ExpiryWindowJitterFrac < 0 the value will be treated as 0.
+ // If ExpiryWindowJitterFrac > 1 the value will be treated as 1.
+ ExpiryWindowJitterFrac float64
+}
+
+// CredentialsCache provides caching and concurrency safe credentials retrieval
+// via the provider's retrieve method.
+//
+// CredentialsCache will look for optional interfaces on the Provider to adjust
+// how the credential cache handles credentials caching.
+//
+// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle
+// credential refresh failures. This could return an updated Credentials
+// value, or attempt another means of retrieving credentials.
+//
+// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how
+// credentials Expires is modified. This could modify how the Credentials
+// Expires is adjusted based on the CredentialsCache ExpiryWindow option.
+// Such as providing a floor not to reduce the Expires below.
+type CredentialsCache struct {
+ provider CredentialsProvider
+
+ options CredentialsCacheOptions
+ creds atomic.Value
+ sf singleflight.Group
+}
+
+// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider
+// is expected to not be nil. A variadic list of one or more functions can be
+// provided to modify the CredentialsCache configuration. This allows for
+// configuration of credential expiry window and jitter.
+func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache {
+ options := CredentialsCacheOptions{}
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.ExpiryWindow < 0 {
+ options.ExpiryWindow = 0
+ }
+
+ if options.ExpiryWindowJitterFrac < 0 {
+ options.ExpiryWindowJitterFrac = 0
+ } else if options.ExpiryWindowJitterFrac > 1 {
+ options.ExpiryWindowJitterFrac = 1
+ }
+
+ return &CredentialsCache{
+ provider: provider,
+ options: options,
+ }
+}
+
+// Retrieve returns the credentials. If the credentials have already been
+// retrieved, and not expired the cached credentials will be returned. If the
+// credentials have not been retrieved yet, or expired the provider's Retrieve
+// method will be called.
+//
+// Returns and error if the provider's retrieve method returns an error.
+func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) {
+ if creds, ok := p.getCreds(); ok && !creds.Expired() {
+ return creds, nil
+ }
+
+ resCh := p.sf.DoChan("", func() (interface{}, error) {
+ return p.singleRetrieve(&suppressedContext{ctx})
+ })
+ select {
+ case res := <-resCh:
+ return res.Val.(Credentials), res.Err
+ case <-ctx.Done():
+ return Credentials{}, &RequestCanceledError{Err: ctx.Err()}
+ }
+}
+
+func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) {
+ currCreds, ok := p.getCreds()
+ if ok && !currCreds.Expired() {
+ return currCreds, nil
+ }
+
+ newCreds, err := p.provider.Retrieve(ctx)
+ if err != nil {
+ handleFailToRefresh := defaultHandleFailToRefresh
+ if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok {
+ handleFailToRefresh = cs.HandleFailToRefresh
+ }
+ newCreds, err = handleFailToRefresh(ctx, currCreds, err)
+ if err != nil {
+ return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err)
+ }
+ }
+
+ if newCreds.CanExpire && p.options.ExpiryWindow > 0 {
+ adjustExpiresBy := defaultAdjustExpiresBy
+ if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok {
+ adjustExpiresBy = cs.AdjustExpiresBy
+ }
+
+ randFloat64, err := sdkrand.CryptoRandFloat64()
+ if err != nil {
+ return Credentials{}, fmt.Errorf("failed to get random provider, %w", err)
+ }
+
+ var jitter time.Duration
+ if p.options.ExpiryWindowJitterFrac > 0 {
+ jitter = time.Duration(randFloat64 *
+ p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow))
+ }
+
+ newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter))
+ if err != nil {
+ return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err)
+ }
+ }
+
+ p.creds.Store(&newCreds)
+ return newCreds, nil
+}
+
+// getCreds returns the currently stored credentials and true. Returning false
+// if no credentials were stored.
+func (p *CredentialsCache) getCreds() (Credentials, bool) {
+ v := p.creds.Load()
+ if v == nil {
+ return Credentials{}, false
+ }
+
+ c := v.(*Credentials)
+ if c == nil || !c.HasKeys() {
+ return Credentials{}, false
+ }
+
+ return *c, true
+}
+
+// ProviderSources returns a list of where the underlying credential provider
+// has been sourced, if available. Returns empty if the provider doesn't implement
+// the interface
+func (p *CredentialsCache) ProviderSources() []CredentialSource {
+ asSource, ok := p.provider.(CredentialProviderSource)
+ if !ok {
+ return []CredentialSource{}
+ }
+ return asSource.ProviderSources()
+}
+
+// Invalidate will invalidate the cached credentials. The next call to Retrieve
+// will cause the provider's Retrieve method to be called.
+func (p *CredentialsCache) Invalidate() {
+ p.creds.Store((*Credentials)(nil))
+}
+
+// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache
+// matches the target provider type.
+func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool {
+ return IsCredentialsProvider(p.provider, target)
+}
+
+// HandleFailRefreshCredentialsCacheStrategy is an interface for
+// CredentialsCache to allow CredentialsProvider how failed to refresh
+// credentials is handled.
+type HandleFailRefreshCredentialsCacheStrategy interface {
+ // Given the previously cached Credentials, if any, and refresh error, may
+ // returns new or modified set of Credentials, or error.
+ //
+ // Credential caches may use default implementation if nil.
+ HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error)
+}
+
+// defaultHandleFailToRefresh returns the passed in error.
+func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) {
+ return Credentials{}, err
+}
+
+// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache
+// to allow CredentialsProvider to intercept adjustments to Credentials expiry
+// based on expectations and use cases of CredentialsProvider.
+//
+// Credential caches may use default implementation if nil.
+type AdjustExpiresByCredentialsCacheStrategy interface {
+ // Given a Credentials as input, applying any mutations and
+ // returning the potentially updated Credentials, or error.
+ AdjustExpiresBy(Credentials, time.Duration) (Credentials, error)
+}
+
+// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires,
+// and returns the updated credentials value. If Credentials value's CanExpire
+// is false, the passed in credentials are returned unchanged.
+func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) {
+ if !creds.CanExpire {
+ return creds, nil
+ }
+
+ creds.Expires = creds.Expires.Add(dur)
+ return creds, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
new file mode 100644
index 000000000..4ad2ee440
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
@@ -0,0 +1,230 @@
+package aws
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+// AnonymousCredentials provides a sentinel CredentialsProvider that should be
+// used to instruct the SDK's signing middleware to not sign the request.
+//
+// Using `nil` credentials when configuring an API client will achieve the same
+// result. The AnonymousCredentials type allows you to configure the SDK's
+// external config loading to not attempt to source credentials from the shared
+// config or environment.
+//
+// For example you can use this CredentialsProvider with an API client's
+// Options to instruct the client not to sign a request for accessing public
+// S3 bucket objects.
+//
+// The following example demonstrates using the AnonymousCredentials to prevent
+// SDK's external config loading attempt to resolve credentials.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO(),
+// config.WithCredentialsProvider(aws.AnonymousCredentials{}),
+// )
+// if err != nil {
+// log.Fatalf("failed to load config, %v", err)
+// }
+//
+// client := s3.NewFromConfig(cfg)
+//
+// Alternatively you can leave the API client Option's `Credential` member to
+// nil. If using the `NewFromConfig` constructor you'll need to explicitly set
+// the `Credentials` member to nil, if the external config resolved a
+// credential provider.
+//
+// client := s3.New(s3.Options{
+// // Credentials defaults to a nil value.
+// })
+//
+// This can also be configured for specific operations calls too.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// log.Fatalf("failed to load config, %v", err)
+// }
+//
+// client := s3.NewFromConfig(config)
+//
+// result, err := client.GetObject(context.TODO(), s3.GetObject{
+// Bucket: aws.String("example-bucket"),
+// Key: aws.String("example-key"),
+// }, func(o *s3.Options) {
+// o.Credentials = nil
+// // Or
+// o.Credentials = aws.AnonymousCredentials{}
+// })
+type AnonymousCredentials struct{}
+
+// Retrieve implements the CredentialsProvider interface, but will always
+// return error, and cannot be used to sign a request. The AnonymousCredentials
+// type is used as a sentinel type instructing the AWS request signing
+// middleware to not sign a request.
+func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) {
+ return Credentials{Source: "AnonymousCredentials"},
+ fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with")
+}
+
+// CredentialSource is the source of the credential provider.
+// A provider can have multiple credential sources: For example, a provider that reads a profile, calls ECS to
+// get credentials and then assumes a role using STS will have all these as part of its provider chain.
+type CredentialSource int
+
+const (
+ // CredentialSourceUndefined is the sentinel zero value
+ CredentialSourceUndefined CredentialSource = iota
+ // CredentialSourceCode credentials resolved from code, cli parameters, session object, or client instance
+ CredentialSourceCode
+ // CredentialSourceEnvVars credentials resolved from environment variables
+ CredentialSourceEnvVars
+ // CredentialSourceEnvVarsSTSWebIDToken credentials resolved from environment variables for assuming a role with STS using a web identity token
+ CredentialSourceEnvVarsSTSWebIDToken
+ // CredentialSourceSTSAssumeRole credentials resolved from STS using AssumeRole
+ CredentialSourceSTSAssumeRole
+ // CredentialSourceSTSAssumeRoleSaml credentials resolved from STS using assume role with SAML
+ CredentialSourceSTSAssumeRoleSaml
+ // CredentialSourceSTSAssumeRoleWebID credentials resolved from STS using assume role with web identity
+ CredentialSourceSTSAssumeRoleWebID
+ // CredentialSourceSTSFederationToken credentials resolved from STS using a federation token
+ CredentialSourceSTSFederationToken
+ // CredentialSourceSTSSessionToken credentials resolved from STS using a session token S
+ CredentialSourceSTSSessionToken
+ // CredentialSourceProfile credentials resolved from a config file(s) profile with static credentials
+ CredentialSourceProfile
+ // CredentialSourceProfileSourceProfile credentials resolved from a source profile in a config file(s) profile
+ CredentialSourceProfileSourceProfile
+ // CredentialSourceProfileNamedProvider credentials resolved from a named provider in a config file(s) profile (like EcsContainer)
+ CredentialSourceProfileNamedProvider
+ // CredentialSourceProfileSTSWebIDToken credentials resolved from configuration for assuming a role with STS using web identity token in a config file(s) profile
+ CredentialSourceProfileSTSWebIDToken
+ // CredentialSourceProfileSSO credentials resolved from an SSO session in a config file(s) profile
+ CredentialSourceProfileSSO
+ // CredentialSourceSSO credentials resolved from an SSO session
+ CredentialSourceSSO
+ // CredentialSourceProfileSSOLegacy credentials resolved from an SSO session in a config file(s) profile using legacy format
+ CredentialSourceProfileSSOLegacy
+ // CredentialSourceSSOLegacy credentials resolved from an SSO session using legacy format
+ CredentialSourceSSOLegacy
+ // CredentialSourceProfileProcess credentials resolved from a process in a config file(s) profile
+ CredentialSourceProfileProcess
+ // CredentialSourceProcess credentials resolved from a process
+ CredentialSourceProcess
+ // CredentialSourceHTTP credentials resolved from an HTTP endpoint
+ CredentialSourceHTTP
+ // CredentialSourceIMDS credentials resolved from the instance metadata service (IMDS)
+ CredentialSourceIMDS
+)
+
+// A Credentials is the AWS credentials value for individual credential fields.
+type Credentials struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Source of the credentials
+ Source string
+
+ // States if the credentials can expire or not.
+ CanExpire bool
+
+ // The time the credentials will expire at. Should be ignored if CanExpire
+ // is false.
+ Expires time.Time
+
+ // The ID of the account for the credentials.
+ AccountID string
+}
+
+// Expired returns if the credentials have expired.
+func (v Credentials) Expired() bool {
+ if v.CanExpire {
+ // Calling Round(0) on the current time will truncate the monotonic
+ // reading only. Ensures credential expiry time is always based on
+ // reported wall-clock time.
+ return !v.Expires.After(sdk.NowTime().Round(0))
+ }
+
+ return false
+}
+
+// HasKeys returns if the credentials keys are set.
+func (v Credentials) HasKeys() bool {
+ return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0
+}
+
+// A CredentialsProvider is the interface for any component which will provide
+// credentials Credentials. A CredentialsProvider is required to manage its own
+// Expired state, and what to be expired means.
+//
+// A credentials provider implementation can be wrapped with a CredentialCache
+// to cache the credential value retrieved. Without the cache the SDK will
+// attempt to retrieve the credentials for every request.
+type CredentialsProvider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve(ctx context.Context) (Credentials, error)
+}
+
+// CredentialProviderSource allows any credential provider to track
+// all providers where a credential provider were sourced. For example, if the credentials came from a
+// call to a role specified in the profile, this method will give the whole breadcrumb trail
+type CredentialProviderSource interface {
+ ProviderSources() []CredentialSource
+}
+
+// CredentialsProviderFunc provides a helper wrapping a function value to
+// satisfy the CredentialsProvider interface.
+type CredentialsProviderFunc func(context.Context) (Credentials, error)
+
+// Retrieve delegates to the function value the CredentialsProviderFunc wraps.
+func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) {
+ return fn(ctx)
+}
+
+type isCredentialsProvider interface {
+ IsCredentialsProvider(CredentialsProvider) bool
+}
+
+// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the
+// implementation type.
+//
+// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating
+// whether target matches the credential provider type.
+//
+// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used:
+//
+// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false
+// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false
+// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false
+// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false
+func IsCredentialsProvider(provider, target CredentialsProvider) bool {
+ if target == nil || provider == nil {
+ return provider == target
+ }
+
+ if x, ok := provider.(isCredentialsProvider); ok {
+ return x.IsCredentialsProvider(target)
+ }
+
+ targetType := reflect.TypeOf(target)
+ if targetType.Kind() != reflect.Ptr {
+ targetType = reflect.PtrTo(targetType)
+ }
+
+ providerType := reflect.TypeOf(provider)
+ if providerType.Kind() != reflect.Ptr {
+ providerType = reflect.PtrTo(providerType)
+ }
+
+ return targetType.AssignableTo(providerType)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go
new file mode 100644
index 000000000..fd408e518
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go
@@ -0,0 +1,38 @@
+package defaults
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "runtime"
+ "strings"
+)
+
+var getGOOS = func() string {
+ return runtime.GOOS
+}
+
+// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode
+// is set to aws.DefaultsModeAuto.
+func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode {
+ goos := getGOOS()
+ if goos == "android" || goos == "ios" {
+ return aws.DefaultsModeMobile
+ }
+
+ var currentRegion string
+ if len(environment.EnvironmentIdentifier) > 0 {
+ currentRegion = environment.Region
+ }
+
+ if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 {
+ currentRegion = environment.EC2InstanceMetadataRegion
+ }
+
+ if len(region) > 0 && len(currentRegion) > 0 {
+ if strings.EqualFold(region, currentRegion) {
+ return aws.DefaultsModeInRegion
+ }
+ return aws.DefaultsModeCrossRegion
+ }
+
+ return aws.DefaultsModeStandard
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go
new file mode 100644
index 000000000..8b7e01fa2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go
@@ -0,0 +1,43 @@
+package defaults
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// Configuration is the set of SDK configuration options that are determined based
+// on the configured DefaultsMode.
+type Configuration struct {
+ // RetryMode is the configuration's default retry mode API clients should
+ // use for constructing a Retryer.
+ RetryMode aws.RetryMode
+
+ // ConnectTimeout is the maximum amount of time a dial will wait for
+ // a connect to complete.
+ //
+ // See https://pkg.go.dev/net#Dialer.Timeout
+ ConnectTimeout *time.Duration
+
+ // TLSNegotiationTimeout specifies the maximum amount of time waiting to
+ // wait for a TLS handshake.
+ //
+ // See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout
+ TLSNegotiationTimeout *time.Duration
+}
+
+// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set.
+func (c *Configuration) GetConnectTimeout() (time.Duration, bool) {
+ if c.ConnectTimeout == nil {
+ return 0, false
+ }
+ return *c.ConnectTimeout, true
+}
+
+// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set.
+func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) {
+ if c.TLSNegotiationTimeout == nil {
+ return 0, false
+ }
+ return *c.TLSNegotiationTimeout, true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go
new file mode 100644
index 000000000..dbaa873dc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go
@@ -0,0 +1,50 @@
+// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT.
+
+package defaults
+
+import (
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "time"
+)
+
+// GetModeConfiguration returns the default Configuration descriptor for the given mode.
+//
+// Supports the following modes: cross-region, in-region, mobile, standard
+func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) {
+ var mv aws.DefaultsMode
+ mv.SetFromString(string(mode))
+
+ switch mv {
+ case aws.DefaultsModeCrossRegion:
+ settings := Configuration{
+ ConnectTimeout: aws.Duration(3100 * time.Millisecond),
+ RetryMode: aws.RetryMode("standard"),
+ TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
+ }
+ return settings, nil
+ case aws.DefaultsModeInRegion:
+ settings := Configuration{
+ ConnectTimeout: aws.Duration(1100 * time.Millisecond),
+ RetryMode: aws.RetryMode("standard"),
+ TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond),
+ }
+ return settings, nil
+ case aws.DefaultsModeMobile:
+ settings := Configuration{
+ ConnectTimeout: aws.Duration(30000 * time.Millisecond),
+ RetryMode: aws.RetryMode("standard"),
+ TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond),
+ }
+ return settings, nil
+ case aws.DefaultsModeStandard:
+ settings := Configuration{
+ ConnectTimeout: aws.Duration(3100 * time.Millisecond),
+ RetryMode: aws.RetryMode("standard"),
+ TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond),
+ }
+ return settings, nil
+ default:
+ return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go
new file mode 100644
index 000000000..2d90011b4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go
@@ -0,0 +1,2 @@
+// Package defaults provides recommended configuration values for AWS SDKs and CLIs.
+package defaults
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go
new file mode 100644
index 000000000..fcf9387c2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go
@@ -0,0 +1,95 @@
+// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT.
+
+package aws
+
+import (
+ "strings"
+)
+
+// DefaultsMode is the SDK defaults mode setting.
+type DefaultsMode string
+
+// The DefaultsMode constants.
+const (
+ // DefaultsModeAuto is an experimental mode that builds on the standard mode.
+ // The SDK will attempt to discover the execution environment to determine the
+ // appropriate settings automatically.
+ //
+ // Note that the auto detection is heuristics-based and does not guarantee 100%
+ // accuracy. STANDARD mode will be used if the execution environment cannot
+ // be determined. The auto detection might query EC2 Instance Metadata service
+ // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html),
+ // which might introduce latency. Therefore we recommend choosing an explicit
+ // defaults_mode instead if startup latency is critical to your application
+ DefaultsModeAuto DefaultsMode = "auto"
+
+ // DefaultsModeCrossRegion builds on the standard mode and includes optimization
+ // tailored for applications which call AWS services in a different region
+ //
+ // Note that the default values vended from this mode might change as best practices
+ // may evolve. As a result, it is encouraged to perform tests when upgrading
+ // the SDK
+ DefaultsModeCrossRegion DefaultsMode = "cross-region"
+
+ // DefaultsModeInRegion builds on the standard mode and includes optimization
+ // tailored for applications which call AWS services from within the same AWS
+ // region
+ //
+ // Note that the default values vended from this mode might change as best practices
+ // may evolve. As a result, it is encouraged to perform tests when upgrading
+ // the SDK
+ DefaultsModeInRegion DefaultsMode = "in-region"
+
+ // DefaultsModeLegacy provides default settings that vary per SDK and were used
+ // prior to establishment of defaults_mode
+ DefaultsModeLegacy DefaultsMode = "legacy"
+
+ // DefaultsModeMobile builds on the standard mode and includes optimization
+ // tailored for mobile applications
+ //
+ // Note that the default values vended from this mode might change as best practices
+ // may evolve. As a result, it is encouraged to perform tests when upgrading
+ // the SDK
+ DefaultsModeMobile DefaultsMode = "mobile"
+
+ // DefaultsModeStandard provides the latest recommended default values that
+ // should be safe to run in most scenarios
+ //
+ // Note that the default values vended from this mode might change as best practices
+ // may evolve. As a result, it is encouraged to perform tests when upgrading
+ // the SDK
+ DefaultsModeStandard DefaultsMode = "standard"
+)
+
+// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches
+// the provided string when compared using EqualFold. If the value does not match a known
+// constant it will be set to as-is and the function will return false. As a special case, if the
+// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode.
+func (d *DefaultsMode) SetFromString(v string) (ok bool) {
+ switch {
+ case strings.EqualFold(v, string(DefaultsModeAuto)):
+ *d = DefaultsModeAuto
+ ok = true
+ case strings.EqualFold(v, string(DefaultsModeCrossRegion)):
+ *d = DefaultsModeCrossRegion
+ ok = true
+ case strings.EqualFold(v, string(DefaultsModeInRegion)):
+ *d = DefaultsModeInRegion
+ ok = true
+ case strings.EqualFold(v, string(DefaultsModeLegacy)):
+ *d = DefaultsModeLegacy
+ ok = true
+ case strings.EqualFold(v, string(DefaultsModeMobile)):
+ *d = DefaultsModeMobile
+ ok = true
+ case strings.EqualFold(v, string(DefaultsModeStandard)):
+ *d = DefaultsModeStandard
+ ok = true
+ case len(v) == 0:
+ *d = DefaultsModeLegacy
+ ok = true
+ default:
+ *d = DefaultsMode(v)
+ }
+ return ok
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go
new file mode 100644
index 000000000..d8b6e09e5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go
@@ -0,0 +1,62 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// # Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+// var strPtr *string
+//
+// // Without the SDK's conversion functions
+// str := "my string"
+// strPtr = &str
+//
+// // With the SDK's conversion functions
+// strPtr = aws.String("my string")
+//
+// // Convert *string to string value
+// str = aws.ToString(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+// var strPtrs []*string
+// var strs []string = []string{"Go", "Gophers", "Go"}
+//
+// // Convert []string to []*string
+// strPtrs = aws.StringSlice(strs)
+//
+// // Convert []*string to []string
+// strs = aws.ToStringSlice(strPtrs)
+//
+// # SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
+
+// generate.go uses a build tag of "ignore", go run doesn't need to specify
+// this because go run ignores all build flags when running a go file directly.
+//go:generate go run -tags codegen generate.go
+//go:generate go run -tags codegen logging_generate.go
+//go:generate gofmt -w -s .
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
new file mode 100644
index 000000000..99edbf3ee
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
@@ -0,0 +1,247 @@
+package aws
+
+import (
+ "fmt"
+)
+
+// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior.
+type DualStackEndpointState uint
+
+const (
+ // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution.
+ DualStackEndpointStateUnset DualStackEndpointState = iota
+
+ // DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints.
+ DualStackEndpointStateEnabled
+
+ // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints.
+ DualStackEndpointStateDisabled
+)
+
+// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value.
+// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState.
+func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) {
+ type iface interface {
+ GetUseDualStackEndpoint() DualStackEndpointState
+ }
+ for _, option := range options {
+ if i, ok := option.(iface); ok {
+ value = i.GetUseDualStackEndpoint()
+ found = true
+ break
+ }
+ }
+ return value, found
+}
+
+// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior.
+type FIPSEndpointState uint
+
+const (
+ // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution.
+ FIPSEndpointStateUnset FIPSEndpointState = iota
+
+ // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints.
+ FIPSEndpointStateEnabled
+
+ // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints.
+ FIPSEndpointStateDisabled
+)
+
+// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value.
+// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState.
+func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) {
+ type iface interface {
+ GetUseFIPSEndpoint() FIPSEndpointState
+ }
+ for _, option := range options {
+ if i, ok := option.(iface); ok {
+ value = i.GetUseFIPSEndpoint()
+ found = true
+ break
+ }
+ }
+ return value, found
+}
+
+// Endpoint represents the endpoint a service client should make API operation
+// calls to.
+//
+// The SDK will automatically resolve these endpoints per API client using an
+// internal endpoint resolvers. If you'd like to provide custom endpoint
+// resolving behavior you can implement the EndpointResolver interface.
+//
+// Deprecated: This structure was used with the global [EndpointResolver]
+// interface, which has been deprecated in favor of service-specific endpoint
+// resolution. See the deprecation docs on that interface for more information.
+type Endpoint struct {
+ // The base URL endpoint the SDK API clients will use to make API calls to.
+ // The SDK will suffix URI path and query elements to this endpoint.
+ URL string
+
+ // Specifies if the endpoint's hostname can be modified by the SDK's API
+ // client.
+ //
+ // If the hostname is mutable the SDK API clients may modify any part of
+ // the hostname based on the requirements of the API, (e.g. adding, or
+ // removing content in the hostname). Such as, Amazon S3 API client
+ // prefixing "bucketname" to the hostname, or changing the
+ // hostname service name component from "s3." to "s3-accesspoint.dualstack."
+ // for the dualstack endpoint of an S3 Accesspoint resource.
+ //
+ // Care should be taken when providing a custom endpoint for an API. If the
+ // endpoint hostname is mutable, and the client cannot modify the endpoint
+ // correctly, the operation call will most likely fail, or have undefined
+ // behavior.
+ //
+ // If hostname is immutable, the SDK API clients will not modify the
+ // hostname of the URL. This may cause the API client not to function
+ // correctly if the API requires the operation specific hostname values
+ // to be used by the client.
+ //
+ // This flag does not modify the API client's behavior if this endpoint
+ // will be used instead of Endpoint Discovery, or if the endpoint will be
+ // used to perform Endpoint Discovery. That behavior is configured via the
+ // API Client's Options.
+ HostnameImmutable bool
+
+ // The AWS partition the endpoint belongs to.
+ PartitionID string
+
+ // The service name that should be used for signing the requests to the
+ // endpoint.
+ SigningName string
+
+ // The region that should be used for signing the request to the endpoint.
+ SigningRegion string
+
+ // The signing method that should be used for signing the requests to the
+ // endpoint.
+ SigningMethod string
+
+ // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata.
+ // When providing a custom endpoint, you should set the source as EndpointSourceCustom.
+ // If source is not provided when providing a custom endpoint, the SDK may not
+ // perform required host mutations correctly. Source should be used along with
+ // HostnameImmutable property as per the usage requirement.
+ Source EndpointSource
+}
+
+// EndpointSource is the endpoint source type.
+//
+// Deprecated: The global [Endpoint] structure is deprecated.
+type EndpointSource int
+
+const (
+ // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source.
+ EndpointSourceServiceMetadata EndpointSource = iota
+
+ // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when
+ // user provides a custom endpoint to be used by the SDK.
+ EndpointSourceCustom
+)
+
+// EndpointNotFoundError is a sentinel error to indicate that the
+// EndpointResolver implementation was unable to resolve an endpoint for the
+// given service and region. Resolvers should use this to indicate that an API
+// client should fallback and attempt to use it's internal default resolver to
+// resolve the endpoint.
+type EndpointNotFoundError struct {
+ Err error
+}
+
+// Error is the error message.
+func (e *EndpointNotFoundError) Error() string {
+ return fmt.Sprintf("endpoint not found, %v", e.Err)
+}
+
+// Unwrap returns the underlying error.
+func (e *EndpointNotFoundError) Unwrap() error {
+ return e.Err
+}
+
+// EndpointResolver is an endpoint resolver that can be used to provide or
+// override an endpoint for the given service and region. API clients will
+// attempt to use the EndpointResolver first to resolve an endpoint if
+// available. If the EndpointResolver returns an EndpointNotFoundError error,
+// API clients will fallback to attempting to resolve the endpoint using its
+// internal default endpoint resolver.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. The API
+// for endpoint resolution is now unique to each service and is set via the
+// EndpointResolverV2 field on service client options. Setting a value for
+// EndpointResolver on aws.Config or service client options will prevent you
+// from using any endpoint-related service features released after the
+// introduction of EndpointResolverV2. You may also encounter broken or
+// unexpected behavior when using the old global interface with services that
+// use many endpoint-related customizations such as S3.
+type EndpointResolver interface {
+ ResolveEndpoint(service, region string) (Endpoint, error)
+}
+
+// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [EndpointResolver].
+type EndpointResolverFunc func(service, region string) (Endpoint, error)
+
+// ResolveEndpoint calls the wrapped function and returns the results.
+func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) {
+ return e(service, region)
+}
+
+// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or
+// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will
+// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if
+// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error,
+// API clients will fallback to attempting to resolve the endpoint using its
+// internal default endpoint resolver.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [EndpointResolver].
+type EndpointResolverWithOptions interface {
+ ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error)
+}
+
+// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [EndpointResolver].
+type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error)
+
+// ResolveEndpoint calls the wrapped function and returns the results.
+func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) {
+ return e(service, region, options...)
+}
+
+// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value.
+// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS.
+func GetDisableHTTPS(options ...interface{}) (value bool, found bool) {
+ type iface interface {
+ GetDisableHTTPS() bool
+ }
+ for _, option := range options {
+ if i, ok := option.(iface); ok {
+ value = i.GetDisableHTTPS()
+ found = true
+ break
+ }
+ }
+ return value, found
+}
+
+// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value.
+// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion.
+func GetResolvedRegion(options ...interface{}) (value string, found bool) {
+ type iface interface {
+ GetResolvedRegion() string
+ }
+ for _, option := range options {
+ if i, ok := option.(iface); ok {
+ value = i.GetResolvedRegion()
+ found = true
+ break
+ }
+ }
+ return value, found
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go
new file mode 100644
index 000000000..f390a08f9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go
@@ -0,0 +1,9 @@
+package aws
+
+// MissingRegionError is an error that is returned if region configuration
+// value was not found.
+type MissingRegionError struct{}
+
+func (*MissingRegionError) Error() string {
+ return "an AWS region is required, but was not found"
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go
new file mode 100644
index 000000000..2394418e9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go
@@ -0,0 +1,365 @@
+// Code generated by aws/generate.go DO NOT EDIT.
+
+package aws
+
+import (
+ "github.com/aws/smithy-go/ptr"
+ "time"
+)
+
+// ToBool returns bool value dereferenced if the passed
+// in pointer was not nil. Returns a bool zero value if the
+// pointer was nil.
+func ToBool(p *bool) (v bool) {
+ return ptr.ToBool(p)
+}
+
+// ToBoolSlice returns a slice of bool values, that are
+// dereferenced if the passed in pointer was not nil. Returns a bool
+// zero value if the pointer was nil.
+func ToBoolSlice(vs []*bool) []bool {
+ return ptr.ToBoolSlice(vs)
+}
+
+// ToBoolMap returns a map of bool values, that are
+// dereferenced if the passed in pointer was not nil. The bool
+// zero value is used if the pointer was nil.
+func ToBoolMap(vs map[string]*bool) map[string]bool {
+ return ptr.ToBoolMap(vs)
+}
+
+// ToByte returns byte value dereferenced if the passed
+// in pointer was not nil. Returns a byte zero value if the
+// pointer was nil.
+func ToByte(p *byte) (v byte) {
+ return ptr.ToByte(p)
+}
+
+// ToByteSlice returns a slice of byte values, that are
+// dereferenced if the passed in pointer was not nil. Returns a byte
+// zero value if the pointer was nil.
+func ToByteSlice(vs []*byte) []byte {
+ return ptr.ToByteSlice(vs)
+}
+
+// ToByteMap returns a map of byte values, that are
+// dereferenced if the passed in pointer was not nil. The byte
+// zero value is used if the pointer was nil.
+func ToByteMap(vs map[string]*byte) map[string]byte {
+ return ptr.ToByteMap(vs)
+}
+
+// ToString returns string value dereferenced if the passed
+// in pointer was not nil. Returns a string zero value if the
+// pointer was nil.
+func ToString(p *string) (v string) {
+ return ptr.ToString(p)
+}
+
+// ToStringSlice returns a slice of string values, that are
+// dereferenced if the passed in pointer was not nil. Returns a string
+// zero value if the pointer was nil.
+func ToStringSlice(vs []*string) []string {
+ return ptr.ToStringSlice(vs)
+}
+
+// ToStringMap returns a map of string values, that are
+// dereferenced if the passed in pointer was not nil. The string
+// zero value is used if the pointer was nil.
+func ToStringMap(vs map[string]*string) map[string]string {
+ return ptr.ToStringMap(vs)
+}
+
+// ToInt returns int value dereferenced if the passed
+// in pointer was not nil. Returns a int zero value if the
+// pointer was nil.
+func ToInt(p *int) (v int) {
+ return ptr.ToInt(p)
+}
+
+// ToIntSlice returns a slice of int values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int
+// zero value if the pointer was nil.
+func ToIntSlice(vs []*int) []int {
+ return ptr.ToIntSlice(vs)
+}
+
+// ToIntMap returns a map of int values, that are
+// dereferenced if the passed in pointer was not nil. The int
+// zero value is used if the pointer was nil.
+func ToIntMap(vs map[string]*int) map[string]int {
+ return ptr.ToIntMap(vs)
+}
+
+// ToInt8 returns int8 value dereferenced if the passed
+// in pointer was not nil. Returns a int8 zero value if the
+// pointer was nil.
+func ToInt8(p *int8) (v int8) {
+ return ptr.ToInt8(p)
+}
+
+// ToInt8Slice returns a slice of int8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int8
+// zero value if the pointer was nil.
+func ToInt8Slice(vs []*int8) []int8 {
+ return ptr.ToInt8Slice(vs)
+}
+
+// ToInt8Map returns a map of int8 values, that are
+// dereferenced if the passed in pointer was not nil. The int8
+// zero value is used if the pointer was nil.
+func ToInt8Map(vs map[string]*int8) map[string]int8 {
+ return ptr.ToInt8Map(vs)
+}
+
+// ToInt16 returns int16 value dereferenced if the passed
+// in pointer was not nil. Returns a int16 zero value if the
+// pointer was nil.
+func ToInt16(p *int16) (v int16) {
+ return ptr.ToInt16(p)
+}
+
+// ToInt16Slice returns a slice of int16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int16
+// zero value if the pointer was nil.
+func ToInt16Slice(vs []*int16) []int16 {
+ return ptr.ToInt16Slice(vs)
+}
+
+// ToInt16Map returns a map of int16 values, that are
+// dereferenced if the passed in pointer was not nil. The int16
+// zero value is used if the pointer was nil.
+func ToInt16Map(vs map[string]*int16) map[string]int16 {
+ return ptr.ToInt16Map(vs)
+}
+
+// ToInt32 returns int32 value dereferenced if the passed
+// in pointer was not nil. Returns a int32 zero value if the
+// pointer was nil.
+func ToInt32(p *int32) (v int32) {
+ return ptr.ToInt32(p)
+}
+
+// ToInt32Slice returns a slice of int32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int32
+// zero value if the pointer was nil.
+func ToInt32Slice(vs []*int32) []int32 {
+ return ptr.ToInt32Slice(vs)
+}
+
+// ToInt32Map returns a map of int32 values, that are
+// dereferenced if the passed in pointer was not nil. The int32
+// zero value is used if the pointer was nil.
+func ToInt32Map(vs map[string]*int32) map[string]int32 {
+ return ptr.ToInt32Map(vs)
+}
+
+// ToInt64 returns int64 value dereferenced if the passed
+// in pointer was not nil. Returns a int64 zero value if the
+// pointer was nil.
+func ToInt64(p *int64) (v int64) {
+ return ptr.ToInt64(p)
+}
+
+// ToInt64Slice returns a slice of int64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int64
+// zero value if the pointer was nil.
+func ToInt64Slice(vs []*int64) []int64 {
+ return ptr.ToInt64Slice(vs)
+}
+
+// ToInt64Map returns a map of int64 values, that are
+// dereferenced if the passed in pointer was not nil. The int64
+// zero value is used if the pointer was nil.
+func ToInt64Map(vs map[string]*int64) map[string]int64 {
+ return ptr.ToInt64Map(vs)
+}
+
+// ToUint returns uint value dereferenced if the passed
+// in pointer was not nil. Returns a uint zero value if the
+// pointer was nil.
+func ToUint(p *uint) (v uint) {
+ return ptr.ToUint(p)
+}
+
+// ToUintSlice returns a slice of uint values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint
+// zero value if the pointer was nil.
+func ToUintSlice(vs []*uint) []uint {
+ return ptr.ToUintSlice(vs)
+}
+
+// ToUintMap returns a map of uint values, that are
+// dereferenced if the passed in pointer was not nil. The uint
+// zero value is used if the pointer was nil.
+func ToUintMap(vs map[string]*uint) map[string]uint {
+ return ptr.ToUintMap(vs)
+}
+
+// ToUint8 returns uint8 value dereferenced if the passed
+// in pointer was not nil. Returns a uint8 zero value if the
+// pointer was nil.
+func ToUint8(p *uint8) (v uint8) {
+ return ptr.ToUint8(p)
+}
+
+// ToUint8Slice returns a slice of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint8
+// zero value if the pointer was nil.
+func ToUint8Slice(vs []*uint8) []uint8 {
+ return ptr.ToUint8Slice(vs)
+}
+
+// ToUint8Map returns a map of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. The uint8
+// zero value is used if the pointer was nil.
+func ToUint8Map(vs map[string]*uint8) map[string]uint8 {
+ return ptr.ToUint8Map(vs)
+}
+
+// ToUint16 returns uint16 value dereferenced if the passed
+// in pointer was not nil. Returns a uint16 zero value if the
+// pointer was nil.
+func ToUint16(p *uint16) (v uint16) {
+ return ptr.ToUint16(p)
+}
+
+// ToUint16Slice returns a slice of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint16
+// zero value if the pointer was nil.
+func ToUint16Slice(vs []*uint16) []uint16 {
+ return ptr.ToUint16Slice(vs)
+}
+
+// ToUint16Map returns a map of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. The uint16
+// zero value is used if the pointer was nil.
+func ToUint16Map(vs map[string]*uint16) map[string]uint16 {
+ return ptr.ToUint16Map(vs)
+}
+
+// ToUint32 returns uint32 value dereferenced if the passed
+// in pointer was not nil. Returns a uint32 zero value if the
+// pointer was nil.
+func ToUint32(p *uint32) (v uint32) {
+ return ptr.ToUint32(p)
+}
+
+// ToUint32Slice returns a slice of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint32
+// zero value if the pointer was nil.
+func ToUint32Slice(vs []*uint32) []uint32 {
+ return ptr.ToUint32Slice(vs)
+}
+
+// ToUint32Map returns a map of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. The uint32
+// zero value is used if the pointer was nil.
+func ToUint32Map(vs map[string]*uint32) map[string]uint32 {
+ return ptr.ToUint32Map(vs)
+}
+
+// ToUint64 returns uint64 value dereferenced if the passed
+// in pointer was not nil. Returns a uint64 zero value if the
+// pointer was nil.
+func ToUint64(p *uint64) (v uint64) {
+ return ptr.ToUint64(p)
+}
+
+// ToUint64Slice returns a slice of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint64
+// zero value if the pointer was nil.
+func ToUint64Slice(vs []*uint64) []uint64 {
+ return ptr.ToUint64Slice(vs)
+}
+
+// ToUint64Map returns a map of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. The uint64
+// zero value is used if the pointer was nil.
+func ToUint64Map(vs map[string]*uint64) map[string]uint64 {
+ return ptr.ToUint64Map(vs)
+}
+
+// ToFloat32 returns float32 value dereferenced if the passed
+// in pointer was not nil. Returns a float32 zero value if the
+// pointer was nil.
+func ToFloat32(p *float32) (v float32) {
+ return ptr.ToFloat32(p)
+}
+
+// ToFloat32Slice returns a slice of float32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float32
+// zero value if the pointer was nil.
+func ToFloat32Slice(vs []*float32) []float32 {
+ return ptr.ToFloat32Slice(vs)
+}
+
+// ToFloat32Map returns a map of float32 values, that are
+// dereferenced if the passed in pointer was not nil. The float32
+// zero value is used if the pointer was nil.
+func ToFloat32Map(vs map[string]*float32) map[string]float32 {
+ return ptr.ToFloat32Map(vs)
+}
+
+// ToFloat64 returns float64 value dereferenced if the passed
+// in pointer was not nil. Returns a float64 zero value if the
+// pointer was nil.
+func ToFloat64(p *float64) (v float64) {
+ return ptr.ToFloat64(p)
+}
+
+// ToFloat64Slice returns a slice of float64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float64
+// zero value if the pointer was nil.
+func ToFloat64Slice(vs []*float64) []float64 {
+ return ptr.ToFloat64Slice(vs)
+}
+
+// ToFloat64Map returns a map of float64 values, that are
+// dereferenced if the passed in pointer was not nil. The float64
+// zero value is used if the pointer was nil.
+func ToFloat64Map(vs map[string]*float64) map[string]float64 {
+ return ptr.ToFloat64Map(vs)
+}
+
+// ToTime returns time.Time value dereferenced if the passed
+// in pointer was not nil. Returns a time.Time zero value if the
+// pointer was nil.
+func ToTime(p *time.Time) (v time.Time) {
+ return ptr.ToTime(p)
+}
+
+// ToTimeSlice returns a slice of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Time
+// zero value if the pointer was nil.
+func ToTimeSlice(vs []*time.Time) []time.Time {
+ return ptr.ToTimeSlice(vs)
+}
+
+// ToTimeMap returns a map of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. The time.Time
+// zero value is used if the pointer was nil.
+func ToTimeMap(vs map[string]*time.Time) map[string]time.Time {
+ return ptr.ToTimeMap(vs)
+}
+
+// ToDuration returns time.Duration value dereferenced if the passed
+// in pointer was not nil. Returns a time.Duration zero value if the
+// pointer was nil.
+func ToDuration(p *time.Duration) (v time.Duration) {
+ return ptr.ToDuration(p)
+}
+
+// ToDurationSlice returns a slice of time.Duration values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Duration
+// zero value if the pointer was nil.
+func ToDurationSlice(vs []*time.Duration) []time.Duration {
+ return ptr.ToDurationSlice(vs)
+}
+
+// ToDurationMap returns a map of time.Duration values, that are
+// dereferenced if the passed in pointer was not nil. The time.Duration
+// zero value is used if the pointer was nil.
+func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration {
+ return ptr.ToDurationMap(vs)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
new file mode 100644
index 000000000..372d0f983
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package aws
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.39.6"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go
new file mode 100644
index 000000000..91c94d987
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go
@@ -0,0 +1,119 @@
+// Code generated by aws/logging_generate.go DO NOT EDIT.
+
+package aws
+
+// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where
+// each bit is a flag that describes the logging behavior for one or more client components.
+// The entire 64-bit group is reserved for later expansion by the SDK.
+//
+// Example: Setting ClientLogMode to enable logging of retries and requests
+//
+// clientLogMode := aws.LogRetries | aws.LogRequest
+//
+// Example: Adding an additional log mode to an existing ClientLogMode value
+//
+// clientLogMode |= aws.LogResponse
+type ClientLogMode uint64
+
+// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events.
+const (
+ LogSigning ClientLogMode = 1 << (64 - 1 - iota)
+ LogRetries
+ LogRequest
+ LogRequestWithBody
+ LogResponse
+ LogResponseWithBody
+ LogDeprecatedUsage
+ LogRequestEventMessage
+ LogResponseEventMessage
+)
+
+// IsSigning returns whether the Signing logging mode bit is set
+func (m ClientLogMode) IsSigning() bool {
+ return m&LogSigning != 0
+}
+
+// IsRetries returns whether the Retries logging mode bit is set
+func (m ClientLogMode) IsRetries() bool {
+ return m&LogRetries != 0
+}
+
+// IsRequest returns whether the Request logging mode bit is set
+func (m ClientLogMode) IsRequest() bool {
+ return m&LogRequest != 0
+}
+
+// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set
+func (m ClientLogMode) IsRequestWithBody() bool {
+ return m&LogRequestWithBody != 0
+}
+
+// IsResponse returns whether the Response logging mode bit is set
+func (m ClientLogMode) IsResponse() bool {
+ return m&LogResponse != 0
+}
+
+// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set
+func (m ClientLogMode) IsResponseWithBody() bool {
+ return m&LogResponseWithBody != 0
+}
+
+// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set
+func (m ClientLogMode) IsDeprecatedUsage() bool {
+ return m&LogDeprecatedUsage != 0
+}
+
+// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set
+func (m ClientLogMode) IsRequestEventMessage() bool {
+ return m&LogRequestEventMessage != 0
+}
+
+// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set
+func (m ClientLogMode) IsResponseEventMessage() bool {
+ return m&LogResponseEventMessage != 0
+}
+
+// ClearSigning clears the Signing logging mode bit
+func (m *ClientLogMode) ClearSigning() {
+ *m &^= LogSigning
+}
+
+// ClearRetries clears the Retries logging mode bit
+func (m *ClientLogMode) ClearRetries() {
+ *m &^= LogRetries
+}
+
+// ClearRequest clears the Request logging mode bit
+func (m *ClientLogMode) ClearRequest() {
+ *m &^= LogRequest
+}
+
+// ClearRequestWithBody clears the RequestWithBody logging mode bit
+func (m *ClientLogMode) ClearRequestWithBody() {
+ *m &^= LogRequestWithBody
+}
+
+// ClearResponse clears the Response logging mode bit
+func (m *ClientLogMode) ClearResponse() {
+ *m &^= LogResponse
+}
+
+// ClearResponseWithBody clears the ResponseWithBody logging mode bit
+func (m *ClientLogMode) ClearResponseWithBody() {
+ *m &^= LogResponseWithBody
+}
+
+// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit
+func (m *ClientLogMode) ClearDeprecatedUsage() {
+ *m &^= LogDeprecatedUsage
+}
+
+// ClearRequestEventMessage clears the RequestEventMessage logging mode bit
+func (m *ClientLogMode) ClearRequestEventMessage() {
+ *m &^= LogRequestEventMessage
+}
+
+// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit
+func (m *ClientLogMode) ClearResponseEventMessage() {
+ *m &^= LogResponseEventMessage
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go
new file mode 100644
index 000000000..6ecc2231a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go
@@ -0,0 +1,95 @@
+//go:build clientlogmode
+// +build clientlogmode
+
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "strings"
+ "text/template"
+)
+
+var config = struct {
+ ModeBits []string
+}{
+ // Items should be appended only to keep bit-flag positions stable
+ ModeBits: []string{
+ "Signing",
+ "Retries",
+ "Request",
+ "RequestWithBody",
+ "Response",
+ "ResponseWithBody",
+ "DeprecatedUsage",
+ "RequestEventMessage",
+ "ResponseEventMessage",
+ },
+}
+
+func bitName(name string) string {
+ return strings.ToUpper(name[:1]) + name[1:]
+}
+
+var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{
+ "symbolName": func(name string) string {
+ return "Log" + bitName(name)
+ },
+ "bitName": bitName,
+}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT.
+
+package aws
+
+// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where
+// each bit is a flag that describes the logging behavior for one or more client components.
+// The entire 64-bit group is reserved for later expansion by the SDK.
+//
+// Example: Setting ClientLogMode to enable logging of retries and requests
+// clientLogMode := aws.LogRetries | aws.LogRequest
+//
+// Example: Adding an additional log mode to an existing ClientLogMode value
+// clientLogMode |= aws.LogResponse
+type ClientLogMode uint64
+
+// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events.
+const (
+{{- range $index, $field := .ModeBits }}
+ {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }}
+{{- end }}
+)
+{{ range $_, $field := .ModeBits }}
+// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set
+func (m ClientLogMode) Is{{- bitName $field }}() bool {
+ return m&{{- (symbolName $field) }} != 0
+}
+{{ end }}
+{{- range $_, $field := .ModeBits }}
+// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit
+func (m *ClientLogMode) Clear{{- bitName $field }}() {
+ *m &^= {{ (symbolName $field) }}
+}
+{{ end -}}
+`))
+
+func main() {
+ uniqueBitFields := make(map[string]struct{})
+
+ for _, bitName := range config.ModeBits {
+ if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok {
+ panic(fmt.Sprintf("duplicate bit field: %s", bitName))
+ }
+ uniqueBitFields[bitName] = struct{}{}
+ }
+
+ file, err := os.Create("logging.go")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer file.Close()
+
+ err = tmpl.Execute(file, config)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go
new file mode 100644
index 000000000..d66f0960a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go
@@ -0,0 +1,213 @@
+package middleware
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// RegisterServiceMetadata registers metadata about the service and operation into the middleware context
+// so that it is available at runtime for other middleware to introspect.
+type RegisterServiceMetadata struct {
+ ServiceID string
+ SigningName string
+ Region string
+ OperationName string
+}
+
+// ID returns the middleware identifier.
+func (s *RegisterServiceMetadata) ID() string {
+ return "RegisterServiceMetadata"
+}
+
+// HandleInitialize registers service metadata information into the middleware context, allowing for introspection.
+func (s RegisterServiceMetadata) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) {
+ if len(s.ServiceID) > 0 {
+ ctx = SetServiceID(ctx, s.ServiceID)
+ }
+ if len(s.SigningName) > 0 {
+ ctx = SetSigningName(ctx, s.SigningName)
+ }
+ if len(s.Region) > 0 {
+ ctx = setRegion(ctx, s.Region)
+ }
+ if len(s.OperationName) > 0 {
+ ctx = setOperationName(ctx, s.OperationName)
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+// service metadata keys for storing and lookup of runtime stack information.
+type (
+ serviceIDKey struct{}
+ signingNameKey struct{}
+ signingRegionKey struct{}
+ regionKey struct{}
+ operationNameKey struct{}
+ partitionIDKey struct{}
+ requiresLegacyEndpointsKey struct{}
+)
+
+// GetServiceID retrieves the service id from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetServiceID(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string)
+ return v
+}
+
+// GetSigningName retrieves the service signing name from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+//
+// Deprecated: This value is unstable. The resolved signing name is available
+// in the signer properties object passed to the signer.
+func GetSigningName(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string)
+ return v
+}
+
+// GetSigningRegion retrieves the region from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+//
+// Deprecated: This value is unstable. The resolved signing region is available
+// in the signer properties object passed to the signer.
+func GetSigningRegion(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string)
+ return v
+}
+
+// GetRegion retrieves the endpoint region from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetRegion(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, regionKey{}).(string)
+ return v
+}
+
+// GetOperationName retrieves the service operation metadata from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetOperationName(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string)
+ return v
+}
+
+// GetPartitionID retrieves the endpoint partition id from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetPartitionID(ctx context.Context) string {
+ v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string)
+ return v
+}
+
+// GetRequiresLegacyEndpoints the flag used to indicate if legacy endpoint
+// customizations need to be executed.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetRequiresLegacyEndpoints(ctx context.Context) bool {
+ v, _ := middleware.GetStackValue(ctx, requiresLegacyEndpointsKey{}).(bool)
+ return v
+}
+
+// SetRequiresLegacyEndpoints set or modifies the flag indicated that
+// legacy endpoint customizations are needed.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value)
+}
+
+// SetSigningName set or modifies the sigv4 or sigv4a signing name on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+//
+// Deprecated: This value is unstable. Use WithSigV4SigningName client option
+// funcs instead.
+func SetSigningName(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, signingNameKey{}, value)
+}
+
+// SetSigningRegion sets or modifies the region on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+//
+// Deprecated: This value is unstable. Use WithSigV4SigningRegion client option
+// funcs instead.
+func SetSigningRegion(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, signingRegionKey{}, value)
+}
+
+// SetServiceID sets the service id on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetServiceID(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, serviceIDKey{}, value)
+}
+
+// setRegion sets the endpoint region on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setRegion(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, regionKey{}, value)
+}
+
+// setOperationName sets the service operation on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setOperationName(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, operationNameKey{}, value)
+}
+
+// SetPartitionID sets the partition id of a resolved region on the context
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetPartitionID(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, partitionIDKey{}, value)
+}
+
+// EndpointSource key
+type endpointSourceKey struct{}
+
+// GetEndpointSource returns an endpoint source if set on context
+func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) {
+ v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource)
+ return v
+}
+
+// SetEndpointSource sets endpoint source on context
+func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context {
+ return middleware.WithStackValue(ctx, endpointSourceKey{}, value)
+}
+
+type signingCredentialsKey struct{}
+
+// GetSigningCredentials returns the credentials that were used for signing if set on context.
+func GetSigningCredentials(ctx context.Context) (v aws.Credentials) {
+ v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials)
+ return v
+}
+
+// SetSigningCredentials sets the credentails used for signing on the context.
+func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context {
+ return middleware.WithStackValue(ctx, signingCredentialsKey{}, value)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
new file mode 100644
index 000000000..6d5f0079c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
@@ -0,0 +1,168 @@
+package middleware
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/internal/rand"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyrand "github.com/aws/smithy-go/rand"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation
+// invocation.
+type ClientRequestID struct{}
+
+// ID the identifier for the ClientRequestID
+func (r *ClientRequestID) ID() string {
+ return "ClientRequestID"
+}
+
+// HandleBuild attaches a unique operation invocation id for the operation to the request
+func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", req)
+ }
+
+ invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID()
+ if err != nil {
+ return out, metadata, err
+ }
+
+ const invocationIDHeader = "Amz-Sdk-Invocation-Id"
+ req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID)
+
+ return next.HandleBuild(ctx, in)
+}
+
+// RecordResponseTiming records the response timing for the SDK client requests.
+type RecordResponseTiming struct{}
+
+// ID is the middleware identifier
+func (a *RecordResponseTiming) ID() string {
+ return "RecordResponseTiming"
+}
+
+// HandleDeserialize calculates response metadata and clock skew
+func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ responseAt := sdk.NowTime()
+ setResponseAt(&metadata, responseAt)
+
+ var serverTime time.Time
+
+ switch resp := out.RawResponse.(type) {
+ case *smithyhttp.Response:
+ respDateHeader := resp.Header.Get("Date")
+ if len(respDateHeader) == 0 {
+ break
+ }
+ var parseErr error
+ serverTime, parseErr = smithyhttp.ParseTime(respDateHeader)
+ if parseErr != nil {
+ logger := middleware.GetLogger(ctx)
+ logger.Logf(logging.Warn, "failed to parse response Date header value, got %v",
+ parseErr.Error())
+ break
+ }
+ setServerTime(&metadata, serverTime)
+ }
+
+ if !serverTime.IsZero() {
+ attemptSkew := serverTime.Sub(responseAt)
+ setAttemptSkew(&metadata, attemptSkew)
+ }
+
+ return out, metadata, err
+}
+
+type responseAtKey struct{}
+
+// GetResponseAt returns the time response was received at.
+func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) {
+ v, ok = metadata.Get(responseAtKey{}).(time.Time)
+ return v, ok
+}
+
+// setResponseAt sets the response time on the metadata.
+func setResponseAt(metadata *middleware.Metadata, v time.Time) {
+ metadata.Set(responseAtKey{}, v)
+}
+
+type serverTimeKey struct{}
+
+// GetServerTime returns the server time for response.
+func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) {
+ v, ok = metadata.Get(serverTimeKey{}).(time.Time)
+ return v, ok
+}
+
+// setServerTime sets the server time on the metadata.
+func setServerTime(metadata *middleware.Metadata, v time.Time) {
+ metadata.Set(serverTimeKey{}, v)
+}
+
+type attemptSkewKey struct{}
+
+// GetAttemptSkew returns Attempt clock skew for response from metadata.
+func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) {
+ v, ok = metadata.Get(attemptSkewKey{}).(time.Duration)
+ return v, ok
+}
+
+// setAttemptSkew sets the attempt clock skew on the metadata.
+func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) {
+ metadata.Set(attemptSkewKey{}, v)
+}
+
+// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack
+func AddClientRequestIDMiddleware(stack *middleware.Stack) error {
+ return stack.Build.Add(&ClientRequestID{}, middleware.After)
+}
+
+// AddRecordResponseTiming adds RecordResponseTiming middleware to the
+// middleware stack.
+func AddRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After)
+}
+
+// rawResponseKey is the accessor key used to store and access the
+// raw response within the response metadata.
+type rawResponseKey struct{}
+
+// AddRawResponse middleware adds raw response on to the metadata
+type AddRawResponse struct{}
+
+// ID the identifier for the ClientRequestID
+func (m *AddRawResponse) ID() string {
+ return "AddRawResponseToMetadata"
+}
+
+// HandleDeserialize adds raw response on the middleware metadata
+func (m AddRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ metadata.Set(rawResponseKey{}, out.RawResponse)
+ return out, metadata, err
+}
+
+// AddRawResponseToMetadata adds middleware to the middleware stack that
+// store raw response on to the metadata.
+func AddRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&AddRawResponse{}, middleware.Before)
+}
+
+// GetRawResponse returns raw response set on metadata
+func GetRawResponse(metadata middleware.Metadata) interface{} {
+ return metadata.Get(rawResponseKey{})
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go
new file mode 100644
index 000000000..ba262dadc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go
@@ -0,0 +1,24 @@
+//go:build go1.16
+// +build go1.16
+
+package middleware
+
+import "runtime"
+
+func getNormalizedOSName() (os string) {
+ switch runtime.GOOS {
+ case "android":
+ os = "android"
+ case "linux":
+ os = "linux"
+ case "windows":
+ os = "windows"
+ case "darwin":
+ os = "macos"
+ case "ios":
+ os = "ios"
+ default:
+ os = "other"
+ }
+ return os
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go
new file mode 100644
index 000000000..e14a1e4ec
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go
@@ -0,0 +1,24 @@
+//go:build !go1.16
+// +build !go1.16
+
+package middleware
+
+import "runtime"
+
+func getNormalizedOSName() (os string) {
+ switch runtime.GOOS {
+ case "android":
+ os = "android"
+ case "linux":
+ os = "linux"
+ case "windows":
+ os = "windows"
+ case "darwin":
+ // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64
+ // For now declare this as "other" until we have a better detection mechanism.
+ fallthrough
+ default:
+ os = "other"
+ }
+ return os
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go
new file mode 100644
index 000000000..3f6aaf231
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go
@@ -0,0 +1,94 @@
+package middleware
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "os"
+)
+
+const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME"
+const envAmznTraceID = "_X_AMZN_TRACE_ID"
+const amznTraceIDHeader = "X-Amzn-Trace-Id"
+
+// AddRecursionDetection adds recursionDetection to the middleware stack
+func AddRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&RecursionDetection{}, middleware.After)
+}
+
+// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent
+// to avoid recursion invocation in Lambda
+type RecursionDetection struct{}
+
+// ID returns the middleware identifier
+func (m *RecursionDetection) ID() string {
+ return "RecursionDetection"
+}
+
+// HandleBuild detects Lambda environment and adds its trace ID to request header if absent
+func (m *RecursionDetection) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName)
+ xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID)
+ value := req.Header.Get(amznTraceIDHeader)
+ // only set the X-Amzn-Trace-Id header when it is not set initially, the
+ // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists
+ if value != "" || !hasLambdaEnv || !hasTraceID {
+ return next.HandleBuild(ctx, in)
+ }
+
+ req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID))
+ return next.HandleBuild(ctx, in)
+}
+
+func percentEncode(s string) string {
+ upperhex := "0123456789ABCDEF"
+ hexCount := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEncode(c) {
+ hexCount++
+ }
+ }
+
+ if hexCount == 0 {
+ return s
+ }
+
+ required := len(s) + 2*hexCount
+ t := make([]byte, required)
+ j := 0
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; shouldEncode(c) {
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ } else {
+ t[j] = c
+ j++
+ }
+ }
+ return string(t)
+}
+
+func shouldEncode(c byte) bool {
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ return false
+ }
+ switch c {
+ case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',':
+ return false
+ default:
+ return true
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go
new file mode 100644
index 000000000..dd3391fe4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go
@@ -0,0 +1,27 @@
+package middleware
+
+import (
+ "github.com/aws/smithy-go/middleware"
+)
+
+// requestIDKey is used to retrieve request id from response metadata
+type requestIDKey struct{}
+
+// SetRequestIDMetadata sets the provided request id over middleware metadata
+func SetRequestIDMetadata(metadata *middleware.Metadata, id string) {
+ metadata.Set(requestIDKey{}, id)
+}
+
+// GetRequestIDMetadata retrieves the request id from middleware metadata
+// returns string and bool indicating value of request id, whether request id was set.
+func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) {
+ if !metadata.Has(requestIDKey{}) {
+ return "", false
+ }
+
+ v, ok := metadata.Get(requestIDKey{}).(string)
+ if !ok {
+ return "", true
+ }
+ return v, true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
new file mode 100644
index 000000000..128b60a73
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
@@ -0,0 +1,57 @@
+package middleware
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddRequestIDRetrieverMiddleware adds request id retriever middleware
+func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+ // add error wrapper middleware before operation deserializers so that it can wrap the error response
+ // returned by operation deserializers
+ return stack.Deserialize.Insert(&RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+}
+
+// RequestIDRetriever middleware captures the AWS service request ID from the
+// raw response.
+type RequestIDRetriever struct {
+}
+
+// ID returns the middleware identifier
+func (m *RequestIDRetriever) ID() string {
+ return "RequestIDRetriever"
+}
+
+// HandleDeserialize pulls the AWS request ID from the response, storing it in
+// operation metadata.
+func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ // No raw response to wrap with.
+ return out, metadata, err
+ }
+
+ // Different header which can map to request id
+ requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"}
+
+ for _, h := range requestIDHeaderList {
+ // check for headers known to contain Request id
+ if v := resp.Header.Get(h); len(v) != 0 {
+ // set reqID on metadata for successful responses.
+ SetRequestIDMetadata(&metadata, v)
+
+ span, _ := tracing.GetSpan(ctx)
+ span.SetProperty("aws.request_id", v)
+ break
+ }
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
new file mode 100644
index 000000000..3314230fd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
@@ -0,0 +1,393 @@
+package middleware
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "runtime"
+ "sort"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+var languageVersion = strings.TrimPrefix(runtime.Version(), "go")
+
+// SDKAgentKeyType is the metadata type to add to the SDK agent string
+type SDKAgentKeyType int
+
+// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will
+// be mapped to AdditionalMetadata.
+const (
+ _ SDKAgentKeyType = iota
+ APIMetadata
+ OperatingSystemMetadata
+ LanguageMetadata
+ EnvironmentMetadata
+ FeatureMetadata
+ ConfigMetadata
+ FrameworkMetadata
+ AdditionalMetadata
+ ApplicationIdentifier
+ FeatureMetadata2
+)
+
+// Hardcoded value to specify which version of the user agent we're using
+const uaMetadata = "ua/2.1"
+
+func (k SDKAgentKeyType) string() string {
+ switch k {
+ case APIMetadata:
+ return "api"
+ case OperatingSystemMetadata:
+ return "os"
+ case LanguageMetadata:
+ return "lang"
+ case EnvironmentMetadata:
+ return "exec-env"
+ case FeatureMetadata:
+ return "ft"
+ case ConfigMetadata:
+ return "cfg"
+ case FrameworkMetadata:
+ return "lib"
+ case ApplicationIdentifier:
+ return "app"
+ case FeatureMetadata2:
+ return "m"
+ case AdditionalMetadata:
+ fallthrough
+ default:
+ return "md"
+ }
+}
+
+const execEnvVar = `AWS_EXECUTION_ENV`
+
+var validChars = map[rune]bool{
+ '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true,
+ '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true,
+}
+
+// UserAgentFeature enumerates tracked SDK features.
+type UserAgentFeature string
+
+// Enumerates UserAgentFeature.
+const (
+ UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types)
+
+ UserAgentFeatureWaiter = "B"
+ UserAgentFeaturePaginator = "C"
+
+ UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard)
+ UserAgentFeatureRetryModeStandard = "E"
+ UserAgentFeatureRetryModeAdaptive = "F"
+
+ UserAgentFeatureS3Transfer = "G"
+ UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external)
+ UserAgentFeatureS3CryptoV2 = "I" // n/a
+ UserAgentFeatureS3ExpressBucket = "J"
+ UserAgentFeatureS3AccessGrants = "K" // not yet implemented
+
+ UserAgentFeatureGZIPRequestCompression = "L"
+
+ UserAgentFeatureProtocolRPCV2CBOR = "M"
+
+ UserAgentFeatureAccountIDEndpoint = "O" // DO NOT IMPLEMENT: rules output is not currently defined. SDKs should not parse endpoints for feature information.
+ UserAgentFeatureAccountIDModePreferred = "P"
+ UserAgentFeatureAccountIDModeDisabled = "Q"
+ UserAgentFeatureAccountIDModeRequired = "R"
+
+ UserAgentFeatureRequestChecksumCRC32 = "U"
+ UserAgentFeatureRequestChecksumCRC32C = "V"
+ UserAgentFeatureRequestChecksumCRC64 = "W"
+ UserAgentFeatureRequestChecksumSHA1 = "X"
+ UserAgentFeatureRequestChecksumSHA256 = "Y"
+ UserAgentFeatureRequestChecksumWhenSupported = "Z"
+ UserAgentFeatureRequestChecksumWhenRequired = "a"
+ UserAgentFeatureResponseChecksumWhenSupported = "b"
+ UserAgentFeatureResponseChecksumWhenRequired = "c"
+
+ UserAgentFeatureDynamoDBUserAgent = "d" // not yet implemented
+
+ UserAgentFeatureCredentialsCode = "e"
+ UserAgentFeatureCredentialsJvmSystemProperties = "f" // n/a (this is not a JVM sdk)
+ UserAgentFeatureCredentialsEnvVars = "g"
+ UserAgentFeatureCredentialsEnvVarsStsWebIDToken = "h"
+ UserAgentFeatureCredentialsStsAssumeRole = "i"
+ UserAgentFeatureCredentialsStsAssumeRoleSaml = "j" // not yet implemented
+ UserAgentFeatureCredentialsStsAssumeRoleWebID = "k"
+ UserAgentFeatureCredentialsStsFederationToken = "l" // not yet implemented
+ UserAgentFeatureCredentialsStsSessionToken = "m" // not yet implemented
+ UserAgentFeatureCredentialsProfile = "n"
+ UserAgentFeatureCredentialsProfileSourceProfile = "o"
+ UserAgentFeatureCredentialsProfileNamedProvider = "p"
+ UserAgentFeatureCredentialsProfileStsWebIDToken = "q"
+ UserAgentFeatureCredentialsProfileSso = "r"
+ UserAgentFeatureCredentialsSso = "s"
+ UserAgentFeatureCredentialsProfileSsoLegacy = "t"
+ UserAgentFeatureCredentialsSsoLegacy = "u"
+ UserAgentFeatureCredentialsProfileProcess = "v"
+ UserAgentFeatureCredentialsProcess = "w"
+ UserAgentFeatureCredentialsBoto2ConfigFile = "x" // n/a (this is not boto/Python)
+ UserAgentFeatureCredentialsAwsSdkStore = "y" // n/a (this is used by .NET based sdk)
+ UserAgentFeatureCredentialsHTTP = "z"
+ UserAgentFeatureCredentialsIMDS = "0"
+
+ UserAgentFeatureBearerServiceEnvVars = "3"
+)
+
+var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{
+ aws.CredentialSourceCode: UserAgentFeatureCredentialsCode,
+ aws.CredentialSourceEnvVars: UserAgentFeatureCredentialsEnvVars,
+ aws.CredentialSourceEnvVarsSTSWebIDToken: UserAgentFeatureCredentialsEnvVarsStsWebIDToken,
+ aws.CredentialSourceSTSAssumeRole: UserAgentFeatureCredentialsStsAssumeRole,
+ aws.CredentialSourceSTSAssumeRoleSaml: UserAgentFeatureCredentialsStsAssumeRoleSaml,
+ aws.CredentialSourceSTSAssumeRoleWebID: UserAgentFeatureCredentialsStsAssumeRoleWebID,
+ aws.CredentialSourceSTSFederationToken: UserAgentFeatureCredentialsStsFederationToken,
+ aws.CredentialSourceSTSSessionToken: UserAgentFeatureCredentialsStsSessionToken,
+ aws.CredentialSourceProfile: UserAgentFeatureCredentialsProfile,
+ aws.CredentialSourceProfileSourceProfile: UserAgentFeatureCredentialsProfileSourceProfile,
+ aws.CredentialSourceProfileNamedProvider: UserAgentFeatureCredentialsProfileNamedProvider,
+ aws.CredentialSourceProfileSTSWebIDToken: UserAgentFeatureCredentialsProfileStsWebIDToken,
+ aws.CredentialSourceProfileSSO: UserAgentFeatureCredentialsProfileSso,
+ aws.CredentialSourceSSO: UserAgentFeatureCredentialsSso,
+ aws.CredentialSourceProfileSSOLegacy: UserAgentFeatureCredentialsProfileSsoLegacy,
+ aws.CredentialSourceSSOLegacy: UserAgentFeatureCredentialsSsoLegacy,
+ aws.CredentialSourceProfileProcess: UserAgentFeatureCredentialsProfileProcess,
+ aws.CredentialSourceProcess: UserAgentFeatureCredentialsProcess,
+ aws.CredentialSourceHTTP: UserAgentFeatureCredentialsHTTP,
+ aws.CredentialSourceIMDS: UserAgentFeatureCredentialsIMDS,
+}
+
+// RequestUserAgent is a build middleware that set the User-Agent for the request.
+type RequestUserAgent struct {
+ sdkAgent, userAgent *smithyhttp.UserAgentBuilder
+ features map[UserAgentFeature]struct{}
+}
+
+// NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the
+// request.
+//
+// User-Agent example:
+//
+// aws-sdk-go-v2/1.2.3
+//
+// X-Amz-User-Agent example:
+//
+// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15
+func NewRequestUserAgent() *RequestUserAgent {
+ userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder()
+ addProductName(userAgent)
+ addUserAgentMetadata(userAgent)
+ addProductName(sdkAgent)
+
+ r := &RequestUserAgent{
+ sdkAgent: sdkAgent,
+ userAgent: userAgent,
+ features: map[UserAgentFeature]struct{}{},
+ }
+
+ addSDKMetadata(r)
+
+ return r
+}
+
+func addSDKMetadata(r *RequestUserAgent) {
+ r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName())
+ r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion)
+ r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS)
+ r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH)
+ if ev := os.Getenv(execEnvVar); len(ev) > 0 {
+ r.AddSDKAgentKey(EnvironmentMetadata, ev)
+ }
+}
+
+func addProductName(builder *smithyhttp.UserAgentBuilder) {
+ builder.AddKeyValue(aws.SDKName, aws.SDKVersion)
+}
+
+func addUserAgentMetadata(builder *smithyhttp.UserAgentBuilder) {
+ builder.AddKey(uaMetadata)
+}
+
+// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddUserAgentKey(key string) func(*middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ requestUserAgent, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+ requestUserAgent.AddUserAgentKey(key)
+ return nil
+ }
+}
+
+// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ requestUserAgent, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+ requestUserAgent.AddUserAgentKeyValue(key, value)
+ return nil
+ }
+}
+
+// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ requestUserAgent, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+ requestUserAgent.AddSDKAgentKey(keyType, key)
+ return nil
+ }
+}
+
+// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one.
+func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ requestUserAgent, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+ requestUserAgent.AddSDKAgentKeyValue(keyType, key, value)
+ return nil
+ }
+}
+
+// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present.
+func AddRequestUserAgentMiddleware(stack *middleware.Stack) error {
+ _, err := getOrAddRequestUserAgent(stack)
+ return err
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*RequestUserAgent, error) {
+ id := (*RequestUserAgent)(nil).ID()
+ bm, ok := stack.Build.Get(id)
+ if !ok {
+ bm = NewRequestUserAgent()
+ err := stack.Build.Add(bm, middleware.After)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ requestUserAgent, ok := bm.(*RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id)
+ }
+
+ return requestUserAgent, nil
+}
+
+// AddUserAgentKey adds the component identified by name to the User-Agent string.
+func (u *RequestUserAgent) AddUserAgentKey(key string) {
+ u.userAgent.AddKey(strings.Map(rules, key))
+}
+
+// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
+func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) {
+ u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value))
+}
+
+// AddUserAgentFeature adds the feature ID to the tracking list to be emitted
+// in the final User-Agent string.
+func (u *RequestUserAgent) AddUserAgentFeature(feature UserAgentFeature) {
+ u.features[feature] = struct{}{}
+}
+
+// AddSDKAgentKey adds the component identified by name to the User-Agent string.
+func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) {
+ // TODO: should target sdkAgent
+ u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key))
+}
+
+// AddSDKAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
+func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) {
+ // TODO: should target sdkAgent
+ u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value))
+}
+
+// AddCredentialsSource adds the credential source as a feature on the User-Agent string
+func (u *RequestUserAgent) AddCredentialsSource(source aws.CredentialSource) {
+ x, ok := credentialSourceToFeature[source]
+ if ok {
+ u.AddUserAgentFeature(x)
+ }
+}
+
+// ID the name of the middleware.
+func (u *RequestUserAgent) ID() string {
+ return "UserAgent"
+}
+
+// HandleBuild adds or appends the constructed user agent to the request.
+func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ switch req := in.Request.(type) {
+ case *smithyhttp.Request:
+ u.addHTTPUserAgent(req)
+ // TODO: To be re-enabled
+ // u.addHTTPSDKAgent(req)
+ default:
+ return out, metadata, fmt.Errorf("unknown transport type %T", in)
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) {
+ const userAgent = "User-Agent"
+ if len(u.features) > 0 {
+ updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features))
+ }
+ updateHTTPHeader(request, userAgent, u.userAgent.Build())
+}
+
+func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) {
+ const sdkAgent = "X-Amz-User-Agent"
+ updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build())
+}
+
+func updateHTTPHeader(request *smithyhttp.Request, header string, value string) {
+ var current string
+ if v := request.Header[header]; len(v) > 0 {
+ current = v[0]
+ }
+ if len(current) > 0 {
+ current = value + " " + current
+ } else {
+ current = value
+ }
+ request.Header[header] = append(request.Header[header][:0], current)
+}
+
+func rules(r rune) rune {
+ switch {
+ case r >= '0' && r <= '9':
+ return r
+ case r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z':
+ return r
+ case validChars[r]:
+ return r
+ default:
+ return '-'
+ }
+}
+
+func buildFeatureMetrics(features map[UserAgentFeature]struct{}) string {
+ fs := make([]string, 0, len(features))
+ for f := range features {
+ fs = append(fs, string(f))
+ }
+
+ sort.Strings(fs)
+ return fmt.Sprintf("%s/%s", FeatureMetadata2.string(), strings.Join(fs, ","))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md
new file mode 100644
index 000000000..ddb162b36
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md
@@ -0,0 +1,142 @@
+# v1.6.10 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+
+# v1.6.9 (2025-02-14)
+
+* **Bug Fix**: Remove max limit on event stream messages
+
+# v1.6.8 (2025-01-24)
+
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.6.7 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+
+# v1.6.6 (2024-10-04)
+
+* No change notes available for this release.
+
+# v1.6.5 (2024-09-20)
+
+* No change notes available for this release.
+
+# v1.6.4 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+
+# v1.6.3 (2024-06-28)
+
+* No change notes available for this release.
+
+# v1.6.2 (2024-03-29)
+
+* No change notes available for this release.
+
+# v1.6.1 (2024-02-21)
+
+* No change notes available for this release.
+
+# v1.6.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# v1.5.4 (2023-12-07)
+
+* No change notes available for this release.
+
+# v1.5.3 (2023-11-30)
+
+* No change notes available for this release.
+
+# v1.5.2 (2023-11-29)
+
+* No change notes available for this release.
+
+# v1.5.1 (2023-11-15)
+
+* No change notes available for this release.
+
+# v1.5.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+
+# v1.4.14 (2023-10-06)
+
+* No change notes available for this release.
+
+# v1.4.13 (2023-08-18)
+
+* No change notes available for this release.
+
+# v1.4.12 (2023-08-07)
+
+* No change notes available for this release.
+
+# v1.4.11 (2023-07-31)
+
+* No change notes available for this release.
+
+# v1.4.10 (2022-12-02)
+
+* No change notes available for this release.
+
+# v1.4.9 (2022-10-24)
+
+* No change notes available for this release.
+
+# v1.4.8 (2022-09-14)
+
+* No change notes available for this release.
+
+# v1.4.7 (2022-09-02)
+
+* No change notes available for this release.
+
+# v1.4.6 (2022-08-31)
+
+* No change notes available for this release.
+
+# v1.4.5 (2022-08-29)
+
+* No change notes available for this release.
+
+# v1.4.4 (2022-08-09)
+
+* No change notes available for this release.
+
+# v1.4.3 (2022-06-29)
+
+* No change notes available for this release.
+
+# v1.4.2 (2022-06-07)
+
+* No change notes available for this release.
+
+# v1.4.1 (2022-03-24)
+
+* No change notes available for this release.
+
+# v1.4.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.3.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.2.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.1.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.0.0 (2021-11-06)
+
+* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release.
+* **Release**: Protocol support has been added for AWS event stream.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go
new file mode 100644
index 000000000..151054971
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/debug.go
@@ -0,0 +1,144 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+)
+
+type decodedMessage struct {
+ rawMessage
+ Headers decodedHeaders `json:"headers"`
+}
+type jsonMessage struct {
+ Length json.Number `json:"total_length"`
+ HeadersLen json.Number `json:"headers_length"`
+ PreludeCRC json.Number `json:"prelude_crc"`
+ Headers decodedHeaders `json:"headers"`
+ Payload []byte `json:"payload"`
+ CRC json.Number `json:"message_crc"`
+}
+
+func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) {
+ var jsonMsg jsonMessage
+ if err = json.Unmarshal(b, &jsonMsg); err != nil {
+ return err
+ }
+
+ d.Length, err = numAsUint32(jsonMsg.Length)
+ if err != nil {
+ return err
+ }
+ d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen)
+ if err != nil {
+ return err
+ }
+ d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC)
+ if err != nil {
+ return err
+ }
+ d.Headers = jsonMsg.Headers
+ d.Payload = jsonMsg.Payload
+ d.CRC, err = numAsUint32(jsonMsg.CRC)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (d *decodedMessage) MarshalJSON() ([]byte, error) {
+ jsonMsg := jsonMessage{
+ Length: json.Number(strconv.Itoa(int(d.Length))),
+ HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))),
+ PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))),
+ Headers: d.Headers,
+ Payload: d.Payload,
+ CRC: json.Number(strconv.Itoa(int(d.CRC))),
+ }
+
+ return json.Marshal(jsonMsg)
+}
+
+func numAsUint32(n json.Number) (uint32, error) {
+ v, err := n.Int64()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get int64 json number, %v", err)
+ }
+
+ return uint32(v), nil
+}
+
+func (d decodedMessage) Message() Message {
+ return Message{
+ Headers: Headers(d.Headers),
+ Payload: d.Payload,
+ }
+}
+
+type decodedHeaders Headers
+
+func (hs *decodedHeaders) UnmarshalJSON(b []byte) error {
+ var jsonHeaders []struct {
+ Name string `json:"name"`
+ Type valueType `json:"type"`
+ Value interface{} `json:"value"`
+ }
+
+ decoder := json.NewDecoder(bytes.NewReader(b))
+ decoder.UseNumber()
+ if err := decoder.Decode(&jsonHeaders); err != nil {
+ return err
+ }
+
+ var headers Headers
+ for _, h := range jsonHeaders {
+ value, err := valueFromType(h.Type, h.Value)
+ if err != nil {
+ return err
+ }
+ headers.Set(h.Name, value)
+ }
+ *hs = decodedHeaders(headers)
+
+ return nil
+}
+
+func valueFromType(typ valueType, val interface{}) (Value, error) {
+ switch typ {
+ case trueValueType:
+ return BoolValue(true), nil
+ case falseValueType:
+ return BoolValue(false), nil
+ case int8ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int8Value(int8(v)), err
+ case int16ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int16Value(int16(v)), err
+ case int32ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int32Value(int32(v)), err
+ case int64ValueType:
+ v, err := val.(json.Number).Int64()
+ return Int64Value(v), err
+ case bytesValueType:
+ v, err := base64.StdEncoding.DecodeString(val.(string))
+ return BytesValue(v), err
+ case stringValueType:
+ v, err := base64.StdEncoding.DecodeString(val.(string))
+ return StringValue(string(v)), err
+ case timestampValueType:
+ v, err := val.(json.Number).Int64()
+ return TimestampValue(timeFromEpochMilli(v)), err
+ case uuidValueType:
+ v, err := base64.StdEncoding.DecodeString(val.(string))
+ var tv UUIDValue
+ copy(tv[:], v)
+ return tv, err
+ default:
+ panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go
new file mode 100644
index 000000000..d9ab7652f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/decode.go
@@ -0,0 +1,218 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "github.com/aws/smithy-go/logging"
+ "hash"
+ "hash/crc32"
+ "io"
+)
+
+// DecoderOptions is the Decoder configuration options.
+type DecoderOptions struct {
+ Logger logging.Logger
+ LogMessages bool
+}
+
+// Decoder provides decoding of an Event Stream messages.
+type Decoder struct {
+ options DecoderOptions
+}
+
+// NewDecoder initializes and returns a Decoder for decoding event
+// stream messages from the reader provided.
+func NewDecoder(optFns ...func(*DecoderOptions)) *Decoder {
+ options := DecoderOptions{}
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &Decoder{
+ options: options,
+ }
+}
+
+// Decode attempts to decode a single message from the event stream reader.
+// Will return the event stream message, or error if decodeMessage fails to read
+// the message from the stream.
+//
+// payloadBuf is a byte slice that will be used in the returned Message.Payload. Callers
+// must ensure that the Message.Payload from a previous decode has been consumed before passing in the same underlying
+// payloadBuf byte slice.
+func (d *Decoder) Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) {
+ if d.options.Logger != nil && d.options.LogMessages {
+ debugMsgBuf := bytes.NewBuffer(nil)
+ reader = io.TeeReader(reader, debugMsgBuf)
+ defer func() {
+ logMessageDecode(d.options.Logger, debugMsgBuf, m, err)
+ }()
+ }
+
+ m, err = decodeMessage(reader, payloadBuf)
+
+ return m, err
+}
+
+// decodeMessage attempts to decode a single message from the event stream reader.
+// Will return the event stream message, or error if decodeMessage fails to read
+// the message from the reader.
+func decodeMessage(reader io.Reader, payloadBuf []byte) (m Message, err error) {
+ crc := crc32.New(crc32IEEETable)
+ hashReader := io.TeeReader(reader, crc)
+
+ prelude, err := decodePrelude(hashReader, crc)
+ if err != nil {
+ return Message{}, err
+ }
+
+ if prelude.HeadersLen > 0 {
+ lr := io.LimitReader(hashReader, int64(prelude.HeadersLen))
+ m.Headers, err = decodeHeaders(lr)
+ if err != nil {
+ return Message{}, err
+ }
+ }
+
+ if payloadLen := prelude.PayloadLen(); payloadLen > 0 {
+ buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen)))
+ if err != nil {
+ return Message{}, err
+ }
+ m.Payload = buf
+ }
+
+ msgCRC := crc.Sum32()
+ if err := validateCRC(reader, msgCRC); err != nil {
+ return Message{}, err
+ }
+
+ return m, nil
+}
+
+func logMessageDecode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) {
+ w := bytes.NewBuffer(nil)
+ defer func() { logger.Logf(logging.Debug, w.String()) }()
+
+ fmt.Fprintf(w, "Raw message:\n%s\n",
+ hex.Dump(msgBuf.Bytes()))
+
+ if decodeErr != nil {
+ fmt.Fprintf(w, "decodeMessage error: %v\n", decodeErr)
+ return
+ }
+
+ rawMsg, err := msg.rawMessage()
+ if err != nil {
+ fmt.Fprintf(w, "failed to create raw message, %v\n", err)
+ return
+ }
+
+ decodedMsg := decodedMessage{
+ rawMessage: rawMsg,
+ Headers: decodedHeaders(msg.Headers),
+ }
+
+ fmt.Fprintf(w, "Decoded message:\n")
+ encoder := json.NewEncoder(w)
+ if err := encoder.Encode(decodedMsg); err != nil {
+ fmt.Fprintf(w, "failed to generate decoded message, %v\n", err)
+ }
+}
+
+func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) {
+ var p messagePrelude
+
+ var err error
+ p.Length, err = decodeUint32(r)
+ if err != nil {
+ return messagePrelude{}, err
+ }
+
+ p.HeadersLen, err = decodeUint32(r)
+ if err != nil {
+ return messagePrelude{}, err
+ }
+
+ if err := p.ValidateLens(); err != nil {
+ return messagePrelude{}, err
+ }
+
+ preludeCRC := crc.Sum32()
+ if err := validateCRC(r, preludeCRC); err != nil {
+ return messagePrelude{}, err
+ }
+
+ p.PreludeCRC = preludeCRC
+
+ return p, nil
+}
+
+func decodePayload(buf []byte, r io.Reader) ([]byte, error) {
+ w := bytes.NewBuffer(buf[0:0])
+
+ _, err := io.Copy(w, r)
+ return w.Bytes(), err
+}
+
+func decodeUint8(r io.Reader) (uint8, error) {
+ type byteReader interface {
+ ReadByte() (byte, error)
+ }
+
+ if br, ok := r.(byteReader); ok {
+ v, err := br.ReadByte()
+ return v, err
+ }
+
+ var b [1]byte
+ _, err := io.ReadFull(r, b[:])
+ return b[0], err
+}
+
+func decodeUint16(r io.Reader) (uint16, error) {
+ var b [2]byte
+ bs := b[:]
+ _, err := io.ReadFull(r, bs)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint16(bs), nil
+}
+
+func decodeUint32(r io.Reader) (uint32, error) {
+ var b [4]byte
+ bs := b[:]
+ _, err := io.ReadFull(r, bs)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint32(bs), nil
+}
+
+func decodeUint64(r io.Reader) (uint64, error) {
+ var b [8]byte
+ bs := b[:]
+ _, err := io.ReadFull(r, bs)
+ if err != nil {
+ return 0, err
+ }
+ return binary.BigEndian.Uint64(bs), nil
+}
+
+func validateCRC(r io.Reader, expect uint32) error {
+ msgCRC, err := decodeUint32(r)
+ if err != nil {
+ return err
+ }
+
+ if msgCRC != expect {
+ return ChecksumError{}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go
new file mode 100644
index 000000000..f03ee4b93
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/encode.go
@@ -0,0 +1,167 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "github.com/aws/smithy-go/logging"
+ "hash"
+ "hash/crc32"
+ "io"
+)
+
+// EncoderOptions is the configuration options for Encoder.
+type EncoderOptions struct {
+ Logger logging.Logger
+ LogMessages bool
+}
+
+// Encoder provides EventStream message encoding.
+type Encoder struct {
+ options EncoderOptions
+
+ headersBuf *bytes.Buffer
+ messageBuf *bytes.Buffer
+}
+
+// NewEncoder initializes and returns an Encoder to encode Event Stream
+// messages.
+func NewEncoder(optFns ...func(*EncoderOptions)) *Encoder {
+ o := EncoderOptions{}
+
+ for _, fn := range optFns {
+ fn(&o)
+ }
+
+ return &Encoder{
+ options: o,
+ headersBuf: bytes.NewBuffer(nil),
+ messageBuf: bytes.NewBuffer(nil),
+ }
+}
+
+// Encode encodes a single EventStream message to the io.Writer the Encoder
+// was created with. An error is returned if writing the message fails.
+func (e *Encoder) Encode(w io.Writer, msg Message) (err error) {
+ e.headersBuf.Reset()
+ e.messageBuf.Reset()
+
+ var writer io.Writer = e.messageBuf
+ if e.options.Logger != nil && e.options.LogMessages {
+ encodeMsgBuf := bytes.NewBuffer(nil)
+ writer = io.MultiWriter(writer, encodeMsgBuf)
+ defer func() {
+ logMessageEncode(e.options.Logger, encodeMsgBuf, msg, err)
+ }()
+ }
+
+ if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil {
+ return err
+ }
+
+ crc := crc32.New(crc32IEEETable)
+ hashWriter := io.MultiWriter(writer, crc)
+
+ headersLen := uint32(e.headersBuf.Len())
+ payloadLen := uint32(len(msg.Payload))
+
+ if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil {
+ return err
+ }
+
+ if headersLen > 0 {
+ if _, err = io.Copy(hashWriter, e.headersBuf); err != nil {
+ return err
+ }
+ }
+
+ if payloadLen > 0 {
+ if _, err = hashWriter.Write(msg.Payload); err != nil {
+ return err
+ }
+ }
+
+ msgCRC := crc.Sum32()
+ if err := binary.Write(writer, binary.BigEndian, msgCRC); err != nil {
+ return err
+ }
+
+ _, err = io.Copy(w, e.messageBuf)
+
+ return err
+}
+
+func logMessageEncode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) {
+ w := bytes.NewBuffer(nil)
+ defer func() { logger.Logf(logging.Debug, w.String()) }()
+
+ fmt.Fprintf(w, "Message to encode:\n")
+ encoder := json.NewEncoder(w)
+ if err := encoder.Encode(msg); err != nil {
+ fmt.Fprintf(w, "Failed to get encoded message, %v\n", err)
+ }
+
+ if encodeErr != nil {
+ fmt.Fprintf(w, "Encode error: %v\n", encodeErr)
+ return
+ }
+
+ fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes()))
+}
+
+func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error {
+ p := messagePrelude{
+ Length: minMsgLen + headersLen + payloadLen,
+ HeadersLen: headersLen,
+ }
+ if err := p.ValidateLens(); err != nil {
+ return err
+ }
+
+ err := binaryWriteFields(w, binary.BigEndian,
+ p.Length,
+ p.HeadersLen,
+ )
+ if err != nil {
+ return err
+ }
+
+ p.PreludeCRC = crc.Sum32()
+ err = binary.Write(w, binary.BigEndian, p.PreludeCRC)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// EncodeHeaders writes the header values to the writer encoded in the event
+// stream format. Returns an error if a header fails to encode.
+func EncodeHeaders(w io.Writer, headers Headers) error {
+ for _, h := range headers {
+ hn := headerName{
+ Len: uint8(len(h.Name)),
+ }
+ copy(hn.Name[:hn.Len], h.Name)
+ if err := hn.encode(w); err != nil {
+ return err
+ }
+
+ if err := h.Value.encode(w); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error {
+ for _, v := range vs {
+ if err := binary.Write(w, order, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go
new file mode 100644
index 000000000..5481ef307
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/error.go
@@ -0,0 +1,23 @@
+package eventstream
+
+import "fmt"
+
+// LengthError provides the error for items being larger than a maximum length.
+type LengthError struct {
+ Part string
+ Want int
+ Have int
+ Value interface{}
+}
+
+func (e LengthError) Error() string {
+ return fmt.Sprintf("%s length invalid, %d/%d, %v",
+ e.Part, e.Want, e.Have, e.Value)
+}
+
+// ChecksumError provides the error for message checksum invalidation errors.
+type ChecksumError struct{}
+
+func (e ChecksumError) Error() string {
+ return "message checksum mismatch"
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go
new file mode 100644
index 000000000..93ea71ffd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/headers.go
@@ -0,0 +1,24 @@
+package eventstreamapi
+
+// EventStream headers with specific meaning to async API functionality.
+const (
+ ChunkSignatureHeader = `:chunk-signature` // chunk signature for message
+ DateHeader = `:date` // Date header for signature
+ ContentTypeHeader = ":content-type" // message payload content-type
+
+ // Message header and values
+ MessageTypeHeader = `:message-type` // Identifies type of message.
+ EventMessageType = `event`
+ ErrorMessageType = `error`
+ ExceptionMessageType = `exception`
+
+ // Message Events
+ EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats".
+
+ // Message Error
+ ErrorCodeHeader = `:error-code`
+ ErrorMessageHeader = `:error-message`
+
+ // Message Exception
+ ExceptionTypeHeader = `:exception-type`
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go
new file mode 100644
index 000000000..d07ff6b89
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/middleware.go
@@ -0,0 +1,71 @@
+package eventstreamapi
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+type eventStreamWriterKey struct{}
+
+// GetInputStreamWriter returns EventTypeHeader io.PipeWriter used for the operation's input event stream.
+func GetInputStreamWriter(ctx context.Context) io.WriteCloser {
+ writeCloser, _ := middleware.GetStackValue(ctx, eventStreamWriterKey{}).(io.WriteCloser)
+ return writeCloser
+}
+
+func setInputStreamWriter(ctx context.Context, writeCloser io.WriteCloser) context.Context {
+ return middleware.WithStackValue(ctx, eventStreamWriterKey{}, writeCloser)
+}
+
+// InitializeStreamWriter is a Finalize middleware initializes an in-memory pipe for sending event stream messages
+// via the HTTP request body.
+type InitializeStreamWriter struct{}
+
+// AddInitializeStreamWriter adds the InitializeStreamWriter middleware to the provided stack.
+func AddInitializeStreamWriter(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&InitializeStreamWriter{}, middleware.After)
+}
+
+// ID returns the identifier for the middleware.
+func (i *InitializeStreamWriter) ID() string {
+ return "InitializeStreamWriter"
+}
+
+// HandleFinalize is the middleware implementation.
+func (i *InitializeStreamWriter) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request)
+ }
+
+ inputReader, inputWriter := io.Pipe()
+ defer func() {
+ if err == nil {
+ return
+ }
+ _ = inputReader.Close()
+ _ = inputWriter.Close()
+ }()
+
+ request, err = request.SetStream(inputReader)
+ if err != nil {
+ return out, metadata, err
+ }
+ in.Request = request
+
+ ctx = setInputStreamWriter(ctx, inputWriter)
+
+ out, metadata, err = next.HandleFinalize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go
new file mode 100644
index 000000000..cbf5a2862
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport.go
@@ -0,0 +1,13 @@
+//go:build go1.18
+// +build go1.18
+
+package eventstreamapi
+
+import smithyhttp "github.com/aws/smithy-go/transport/http"
+
+// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality.
+//
+// This operation is a no-op for Go 1.18 and above.
+func ApplyHTTPTransportFixes(r *smithyhttp.Request) error {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go
new file mode 100644
index 000000000..7d10ec2eb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi/transport_go117.go
@@ -0,0 +1,12 @@
+//go:build !go1.18
+// +build !go1.18
+
+package eventstreamapi
+
+import smithyhttp "github.com/aws/smithy-go/transport/http"
+
+// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality.
+func ApplyHTTPTransportFixes(r *smithyhttp.Request) error {
+ r.Header.Set("Expect", "100-continue")
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go
new file mode 100644
index 000000000..01981f464
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package eventstream
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.6.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go
new file mode 100644
index 000000000..f6f8c5674
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header.go
@@ -0,0 +1,175 @@
+package eventstream
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+// Headers are a collection of EventStream header values.
+type Headers []Header
+
+// Header is a single EventStream Key Value header pair.
+type Header struct {
+ Name string
+ Value Value
+}
+
+// Set associates the name with a value. If the header name already exists in
+// the Headers the value will be replaced with the new one.
+func (hs *Headers) Set(name string, value Value) {
+ var i int
+ for ; i < len(*hs); i++ {
+ if (*hs)[i].Name == name {
+ (*hs)[i].Value = value
+ return
+ }
+ }
+
+ *hs = append(*hs, Header{
+ Name: name, Value: value,
+ })
+}
+
+// Get returns the Value associated with the header. Nil is returned if the
+// value does not exist.
+func (hs Headers) Get(name string) Value {
+ for i := 0; i < len(hs); i++ {
+ if h := hs[i]; h.Name == name {
+ return h.Value
+ }
+ }
+ return nil
+}
+
+// Del deletes the value in the Headers if it exists.
+func (hs *Headers) Del(name string) {
+ for i := 0; i < len(*hs); i++ {
+ if (*hs)[i].Name == name {
+ copy((*hs)[i:], (*hs)[i+1:])
+ (*hs) = (*hs)[:len(*hs)-1]
+ }
+ }
+}
+
+// Clone returns a deep copy of the headers
+func (hs Headers) Clone() Headers {
+ o := make(Headers, 0, len(hs))
+ for _, h := range hs {
+ o.Set(h.Name, h.Value)
+ }
+ return o
+}
+
+func decodeHeaders(r io.Reader) (Headers, error) {
+ hs := Headers{}
+
+ for {
+ name, err := decodeHeaderName(r)
+ if err != nil {
+ if err == io.EOF {
+ // EOF while getting header name means no more headers
+ break
+ }
+ return nil, err
+ }
+
+ value, err := decodeHeaderValue(r)
+ if err != nil {
+ return nil, err
+ }
+
+ hs.Set(name, value)
+ }
+
+ return hs, nil
+}
+
+func decodeHeaderName(r io.Reader) (string, error) {
+ var n headerName
+
+ var err error
+ n.Len, err = decodeUint8(r)
+ if err != nil {
+ return "", err
+ }
+
+ name := n.Name[:n.Len]
+ if _, err := io.ReadFull(r, name); err != nil {
+ return "", err
+ }
+
+ return string(name), nil
+}
+
+func decodeHeaderValue(r io.Reader) (Value, error) {
+ var raw rawValue
+
+ typ, err := decodeUint8(r)
+ if err != nil {
+ return nil, err
+ }
+ raw.Type = valueType(typ)
+
+ var v Value
+
+ switch raw.Type {
+ case trueValueType:
+ v = BoolValue(true)
+ case falseValueType:
+ v = BoolValue(false)
+ case int8ValueType:
+ var tv Int8Value
+ err = tv.decode(r)
+ v = tv
+ case int16ValueType:
+ var tv Int16Value
+ err = tv.decode(r)
+ v = tv
+ case int32ValueType:
+ var tv Int32Value
+ err = tv.decode(r)
+ v = tv
+ case int64ValueType:
+ var tv Int64Value
+ err = tv.decode(r)
+ v = tv
+ case bytesValueType:
+ var tv BytesValue
+ err = tv.decode(r)
+ v = tv
+ case stringValueType:
+ var tv StringValue
+ err = tv.decode(r)
+ v = tv
+ case timestampValueType:
+ var tv TimestampValue
+ err = tv.decode(r)
+ v = tv
+ case uuidValueType:
+ var tv UUIDValue
+ err = tv.decode(r)
+ v = tv
+ default:
+ panic(fmt.Sprintf("unknown value type %d", raw.Type))
+ }
+
+ // Error could be EOF, let caller deal with it
+ return v, err
+}
+
+const maxHeaderNameLen = 255
+
+type headerName struct {
+ Len uint8
+ Name [maxHeaderNameLen]byte
+}
+
+func (v headerName) encode(w io.Writer) error {
+ if err := binary.Write(w, binary.BigEndian, v.Len); err != nil {
+ return err
+ }
+
+ _, err := w.Write(v.Name[:v.Len])
+ return err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go
new file mode 100644
index 000000000..423b6bb26
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/header_value.go
@@ -0,0 +1,521 @@
+package eventstream
+
+import (
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+)
+
+const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1
+
+// valueType is the EventStream header value type.
+type valueType uint8
+
+// Header value types
+const (
+ trueValueType valueType = iota
+ falseValueType
+ int8ValueType // Byte
+ int16ValueType // Short
+ int32ValueType // Integer
+ int64ValueType // Long
+ bytesValueType
+ stringValueType
+ timestampValueType
+ uuidValueType
+)
+
+func (t valueType) String() string {
+ switch t {
+ case trueValueType:
+ return "bool"
+ case falseValueType:
+ return "bool"
+ case int8ValueType:
+ return "int8"
+ case int16ValueType:
+ return "int16"
+ case int32ValueType:
+ return "int32"
+ case int64ValueType:
+ return "int64"
+ case bytesValueType:
+ return "byte_array"
+ case stringValueType:
+ return "string"
+ case timestampValueType:
+ return "timestamp"
+ case uuidValueType:
+ return "uuid"
+ default:
+ return fmt.Sprintf("unknown value type %d", uint8(t))
+ }
+}
+
+type rawValue struct {
+ Type valueType
+ Len uint16 // Only set for variable length slices
+ Value []byte // byte representation of value, BigEndian encoding.
+}
+
+func (r rawValue) encodeScalar(w io.Writer, v interface{}) error {
+ return binaryWriteFields(w, binary.BigEndian,
+ r.Type,
+ v,
+ )
+}
+
+func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error {
+ binary.Write(w, binary.BigEndian, r.Type)
+
+ _, err := w.Write(v)
+ return err
+}
+
+func (r rawValue) encodeBytes(w io.Writer, v []byte) error {
+ if len(v) > maxHeaderValueLen {
+ return LengthError{
+ Part: "header value",
+ Want: maxHeaderValueLen, Have: len(v),
+ Value: v,
+ }
+ }
+ r.Len = uint16(len(v))
+
+ err := binaryWriteFields(w, binary.BigEndian,
+ r.Type,
+ r.Len,
+ )
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(v)
+ return err
+}
+
+func (r rawValue) encodeString(w io.Writer, v string) error {
+ if len(v) > maxHeaderValueLen {
+ return LengthError{
+ Part: "header value",
+ Want: maxHeaderValueLen, Have: len(v),
+ Value: v,
+ }
+ }
+ r.Len = uint16(len(v))
+
+ type stringWriter interface {
+ WriteString(string) (int, error)
+ }
+
+ err := binaryWriteFields(w, binary.BigEndian,
+ r.Type,
+ r.Len,
+ )
+ if err != nil {
+ return err
+ }
+
+ if sw, ok := w.(stringWriter); ok {
+ _, err = sw.WriteString(v)
+ } else {
+ _, err = w.Write([]byte(v))
+ }
+
+ return err
+}
+
+func decodeFixedBytesValue(r io.Reader, buf []byte) error {
+ _, err := io.ReadFull(r, buf)
+ return err
+}
+
+func decodeBytesValue(r io.Reader) ([]byte, error) {
+ var raw rawValue
+ var err error
+ raw.Len, err = decodeUint16(r)
+ if err != nil {
+ return nil, err
+ }
+
+ buf := make([]byte, raw.Len)
+ _, err = io.ReadFull(r, buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf, nil
+}
+
+func decodeStringValue(r io.Reader) (string, error) {
+ v, err := decodeBytesValue(r)
+ return string(v), err
+}
+
+// Value represents the abstract header value.
+type Value interface {
+ Get() interface{}
+ String() string
+ valueType() valueType
+ encode(io.Writer) error
+}
+
+// An BoolValue provides eventstream encoding, and representation
+// of a Go bool value.
+type BoolValue bool
+
+// Get returns the underlying type
+func (v BoolValue) Get() interface{} {
+ return bool(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (v BoolValue) valueType() valueType {
+ if v {
+ return trueValueType
+ }
+ return falseValueType
+}
+
+func (v BoolValue) String() string {
+ return strconv.FormatBool(bool(v))
+}
+
+// encode encodes the BoolValue into an eventstream binary value
+// representation.
+func (v BoolValue) encode(w io.Writer) error {
+ return binary.Write(w, binary.BigEndian, v.valueType())
+}
+
+// An Int8Value provides eventstream encoding, and representation of a Go
+// int8 value.
+type Int8Value int8
+
+// Get returns the underlying value.
+func (v Int8Value) Get() interface{} {
+ return int8(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int8Value) valueType() valueType {
+ return int8ValueType
+}
+
+func (v Int8Value) String() string {
+ return fmt.Sprintf("0x%02x", int8(v))
+}
+
+// encode encodes the Int8Value into an eventstream binary value
+// representation.
+func (v Int8Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int8Value) decode(r io.Reader) error {
+ n, err := decodeUint8(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int8Value(n)
+ return nil
+}
+
+// An Int16Value provides eventstream encoding, and representation of a Go
+// int16 value.
+type Int16Value int16
+
+// Get returns the underlying value.
+func (v Int16Value) Get() interface{} {
+ return int16(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int16Value) valueType() valueType {
+ return int16ValueType
+}
+
+func (v Int16Value) String() string {
+ return fmt.Sprintf("0x%04x", int16(v))
+}
+
+// encode encodes the Int16Value into an eventstream binary value
+// representation.
+func (v Int16Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int16Value) decode(r io.Reader) error {
+ n, err := decodeUint16(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int16Value(n)
+ return nil
+}
+
+// An Int32Value provides eventstream encoding, and representation of a Go
+// int32 value.
+type Int32Value int32
+
+// Get returns the underlying value.
+func (v Int32Value) Get() interface{} {
+ return int32(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int32Value) valueType() valueType {
+ return int32ValueType
+}
+
+func (v Int32Value) String() string {
+ return fmt.Sprintf("0x%08x", int32(v))
+}
+
+// encode encodes the Int32Value into an eventstream binary value
+// representation.
+func (v Int32Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int32Value) decode(r io.Reader) error {
+ n, err := decodeUint32(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int32Value(n)
+ return nil
+}
+
+// An Int64Value provides eventstream encoding, and representation of a Go
+// int64 value.
+type Int64Value int64
+
+// Get returns the underlying value.
+func (v Int64Value) Get() interface{} {
+ return int64(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (Int64Value) valueType() valueType {
+ return int64ValueType
+}
+
+func (v Int64Value) String() string {
+ return fmt.Sprintf("0x%016x", int64(v))
+}
+
+// encode encodes the Int64Value into an eventstream binary value
+// representation.
+func (v Int64Value) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+ return raw.encodeScalar(w, v)
+}
+
+func (v *Int64Value) decode(r io.Reader) error {
+ n, err := decodeUint64(r)
+ if err != nil {
+ return err
+ }
+
+ *v = Int64Value(n)
+ return nil
+}
+
+// An BytesValue provides eventstream encoding, and representation of a Go
+// byte slice.
+type BytesValue []byte
+
+// Get returns the underlying value.
+func (v BytesValue) Get() interface{} {
+ return []byte(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (BytesValue) valueType() valueType {
+ return bytesValueType
+}
+
+func (v BytesValue) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(v))
+}
+
+// encode encodes the BytesValue into an eventstream binary value
+// representation.
+func (v BytesValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeBytes(w, []byte(v))
+}
+
+func (v *BytesValue) decode(r io.Reader) error {
+ buf, err := decodeBytesValue(r)
+ if err != nil {
+ return err
+ }
+
+ *v = BytesValue(buf)
+ return nil
+}
+
+// An StringValue provides eventstream encoding, and representation of a Go
+// string.
+type StringValue string
+
+// Get returns the underlying value.
+func (v StringValue) Get() interface{} {
+ return string(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (StringValue) valueType() valueType {
+ return stringValueType
+}
+
+func (v StringValue) String() string {
+ return string(v)
+}
+
+// encode encodes the StringValue into an eventstream binary value
+// representation.
+func (v StringValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeString(w, string(v))
+}
+
+func (v *StringValue) decode(r io.Reader) error {
+ s, err := decodeStringValue(r)
+ if err != nil {
+ return err
+ }
+
+ *v = StringValue(s)
+ return nil
+}
+
+// An TimestampValue provides eventstream encoding, and representation of a Go
+// timestamp.
+type TimestampValue time.Time
+
+// Get returns the underlying value.
+func (v TimestampValue) Get() interface{} {
+ return time.Time(v)
+}
+
+// valueType returns the EventStream header value type value.
+func (TimestampValue) valueType() valueType {
+ return timestampValueType
+}
+
+func (v TimestampValue) epochMilli() int64 {
+ nano := time.Time(v).UnixNano()
+ msec := nano / int64(time.Millisecond)
+ return msec
+}
+
+func (v TimestampValue) String() string {
+ msec := v.epochMilli()
+ return strconv.FormatInt(msec, 10)
+}
+
+// encode encodes the TimestampValue into an eventstream binary value
+// representation.
+func (v TimestampValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ msec := v.epochMilli()
+ return raw.encodeScalar(w, msec)
+}
+
+func (v *TimestampValue) decode(r io.Reader) error {
+ n, err := decodeUint64(r)
+ if err != nil {
+ return err
+ }
+
+ *v = TimestampValue(timeFromEpochMilli(int64(n)))
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface
+func (v TimestampValue) MarshalJSON() ([]byte, error) {
+ return []byte(v.String()), nil
+}
+
+func timeFromEpochMilli(t int64) time.Time {
+ secs := t / 1e3
+ msec := t % 1e3
+ return time.Unix(secs, msec*int64(time.Millisecond)).UTC()
+}
+
+// An UUIDValue provides eventstream encoding, and representation of a UUID
+// value.
+type UUIDValue [16]byte
+
+// Get returns the underlying value.
+func (v UUIDValue) Get() interface{} {
+ return v[:]
+}
+
+// valueType returns the EventStream header value type value.
+func (UUIDValue) valueType() valueType {
+ return uuidValueType
+}
+
+func (v UUIDValue) String() string {
+ var scratch [36]byte
+
+ const dash = '-'
+
+ hex.Encode(scratch[:8], v[0:4])
+ scratch[8] = dash
+ hex.Encode(scratch[9:13], v[4:6])
+ scratch[13] = dash
+ hex.Encode(scratch[14:18], v[6:8])
+ scratch[18] = dash
+ hex.Encode(scratch[19:23], v[8:10])
+ scratch[23] = dash
+ hex.Encode(scratch[24:], v[10:])
+
+ return string(scratch[:])
+}
+
+// encode encodes the UUIDValue into an eventstream binary value
+// representation.
+func (v UUIDValue) encode(w io.Writer) error {
+ raw := rawValue{
+ Type: v.valueType(),
+ }
+
+ return raw.encodeFixedSlice(w, v[:])
+}
+
+func (v *UUIDValue) decode(r io.Reader) error {
+ tv := (*v)[:]
+ return decodeFixedBytesValue(r, tv)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go
new file mode 100644
index 000000000..1a77654f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go
@@ -0,0 +1,99 @@
+package eventstream
+
+import (
+ "bytes"
+ "encoding/binary"
+ "hash/crc32"
+)
+
+const preludeLen = 8
+const preludeCRCLen = 4
+const msgCRCLen = 4
+const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen
+
+var crc32IEEETable = crc32.MakeTable(crc32.IEEE)
+
+// A Message provides the eventstream message representation.
+type Message struct {
+ Headers Headers
+ Payload []byte
+}
+
+func (m *Message) rawMessage() (rawMessage, error) {
+ var raw rawMessage
+
+ if len(m.Headers) > 0 {
+ var headers bytes.Buffer
+ if err := EncodeHeaders(&headers, m.Headers); err != nil {
+ return rawMessage{}, err
+ }
+ raw.Headers = headers.Bytes()
+ raw.HeadersLen = uint32(len(raw.Headers))
+ }
+
+ raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen
+
+ hash := crc32.New(crc32IEEETable)
+ binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen)
+ raw.PreludeCRC = hash.Sum32()
+
+ binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC)
+
+ if raw.HeadersLen > 0 {
+ hash.Write(raw.Headers)
+ }
+
+ // Read payload bytes and update hash for it as well.
+ if len(m.Payload) > 0 {
+ raw.Payload = m.Payload
+ hash.Write(raw.Payload)
+ }
+
+ raw.CRC = hash.Sum32()
+
+ return raw, nil
+}
+
+// Clone returns a deep copy of the message.
+func (m Message) Clone() Message {
+ var payload []byte
+ if m.Payload != nil {
+ payload = make([]byte, len(m.Payload))
+ copy(payload, m.Payload)
+ }
+
+ return Message{
+ Headers: m.Headers.Clone(),
+ Payload: payload,
+ }
+}
+
+type messagePrelude struct {
+ Length uint32
+ HeadersLen uint32
+ PreludeCRC uint32
+}
+
+func (p messagePrelude) PayloadLen() uint32 {
+ return p.Length - p.HeadersLen - minMsgLen
+}
+
+func (p messagePrelude) ValidateLens() error {
+ if p.Length == 0 {
+ return LengthError{
+ Part: "message prelude",
+ Want: minMsgLen,
+ Have: int(p.Length),
+ }
+ }
+ return nil
+}
+
+type rawMessage struct {
+ messagePrelude
+
+ Headers []byte
+ Payload []byte
+
+ CRC uint32
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go
new file mode 100644
index 000000000..6669a3ddf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go
@@ -0,0 +1,61 @@
+package query
+
+import (
+ "net/url"
+ "strconv"
+)
+
+// Array represents the encoding of Query lists and sets. A Query array is a
+// representation of a list of values of a fixed type. A serialized array might
+// look like the following:
+//
+// ListName.member.1=foo
+// &ListName.member.2=bar
+// &Listname.member.3=baz
+type Array struct {
+ // The query values to add the array to.
+ values url.Values
+ // The array's prefix, which includes the names of all parent structures
+ // and ends with the name of the list. For example, the prefix might be
+ // "ParentStructure.ListName". This prefix will be used to form the full
+ // keys for each element in the list. For example, an entry might have the
+ // key "ParentStructure.ListName.member.MemberName.1".
+ //
+ // When the array is not flat the prefix will contain the memberName otherwise the memberName is ignored
+ prefix string
+ // Elements are stored in values, so we keep track of the list size here.
+ size int32
+ // Empty lists are encoded as "=", if we add a value later we will
+ // remove this encoding
+ emptyValue Value
+}
+
+func newArray(values url.Values, prefix string, flat bool, memberName string) *Array {
+ emptyValue := newValue(values, prefix, flat)
+ emptyValue.String("")
+
+ if !flat {
+ // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead
+ prefix = prefix + keySeparator + memberName
+ }
+
+ return &Array{
+ values: values,
+ prefix: prefix,
+ emptyValue: emptyValue,
+ }
+}
+
+// Value adds a new element to the Query Array. Returns a Value type used to
+// encode the array element.
+func (a *Array) Value() Value {
+ if a.size == 0 {
+ delete(a.values, a.emptyValue.key)
+ }
+
+ // Query lists start a 1, so adjust the size first
+ a.size++
+ // Lists can't have flat members
+ // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead
+ return newValue(a.values, a.prefix+keySeparator+strconv.FormatInt(int64(a.size), 10), false)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go
new file mode 100644
index 000000000..2ecf9241c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go
@@ -0,0 +1,80 @@
+package query
+
+import (
+ "io"
+ "net/url"
+ "sort"
+)
+
+// Encoder is a Query encoder that supports construction of Query body
+// values using methods.
+type Encoder struct {
+ // The query values that will be built up to manage encoding.
+ values url.Values
+ // The writer that the encoded body will be written to.
+ writer io.Writer
+ Value
+}
+
+// NewEncoder returns a new Query body encoder
+func NewEncoder(writer io.Writer) *Encoder {
+ values := url.Values{}
+ return &Encoder{
+ values: values,
+ writer: writer,
+ Value: newBaseValue(values),
+ }
+}
+
+// Encode returns the []byte slice representing the current
+// state of the Query encoder.
+func (e Encoder) Encode() error {
+ ws, ok := e.writer.(interface{ WriteString(string) (int, error) })
+ if !ok {
+ // Fall back to less optimal byte slice casting if WriteString isn't available.
+ ws = &wrapWriteString{writer: e.writer}
+ }
+
+ // Get the keys and sort them to have a stable output
+ keys := make([]string, 0, len(e.values))
+ for k := range e.values {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ isFirstEntry := true
+ for _, key := range keys {
+ queryValues := e.values[key]
+ escapedKey := url.QueryEscape(key)
+ for _, value := range queryValues {
+ if !isFirstEntry {
+ if _, err := ws.WriteString(`&`); err != nil {
+ return err
+ }
+ } else {
+ isFirstEntry = false
+ }
+ if _, err := ws.WriteString(escapedKey); err != nil {
+ return err
+ }
+ if _, err := ws.WriteString(`=`); err != nil {
+ return err
+ }
+ if _, err := ws.WriteString(url.QueryEscape(value)); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// wrapWriteString wraps an io.Writer to provide a WriteString method
+// where one is not available.
+type wrapWriteString struct {
+ writer io.Writer
+}
+
+// WriteString writes a string to the wrapped writer by casting it to
+// a byte array first.
+func (w wrapWriteString) WriteString(v string) (int, error) {
+ return w.writer.Write([]byte(v))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go
new file mode 100644
index 000000000..dea242b8b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go
@@ -0,0 +1,78 @@
+package query
+
+import (
+ "fmt"
+ "net/url"
+)
+
+// Map represents the encoding of Query maps. A Query map is a representation
+// of a mapping of arbitrary string keys to arbitrary values of a fixed type.
+// A Map differs from an Object in that the set of keys is not fixed, in that
+// the values must all be of the same type, and that map entries are ordered.
+// A serialized map might look like the following:
+//
+// MapName.entry.1.key=Foo
+// &MapName.entry.1.value=spam
+// &MapName.entry.2.key=Bar
+// &MapName.entry.2.value=eggs
+type Map struct {
+ // The query values to add the map to.
+ values url.Values
+ // The map's prefix, which includes the names of all parent structures
+ // and ends with the name of the object. For example, the prefix might be
+ // "ParentStructure.MapName". This prefix will be used to form the full
+ // keys for each key-value pair of the map. For example, a value might have
+ // the key "ParentStructure.MapName.1.value".
+ //
+ // While this is currently represented as a string that gets added to, it
+ // could also be represented as a stack that only gets condensed into a
+ // string when a finalized key is created. This could potentially reduce
+ // allocations.
+ prefix string
+ // Whether the map is flat or not. A map that is not flat will produce the
+ // following entries to the url.Values for a given key-value pair:
+ // MapName.entry.1.KeyLocationName=mykey
+ // MapName.entry.1.ValueLocationName=myvalue
+ // A map that is flat will produce the following:
+ // MapName.1.KeyLocationName=mykey
+ // MapName.1.ValueLocationName=myvalue
+ flat bool
+ // The location name of the key. In most cases this should be "key".
+ keyLocationName string
+ // The location name of the value. In most cases this should be "value".
+ valueLocationName string
+ // Elements are stored in values, so we keep track of the list size here.
+ size int32
+}
+
+func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map {
+ return &Map{
+ values: values,
+ prefix: prefix,
+ flat: flat,
+ keyLocationName: keyLocationName,
+ valueLocationName: valueLocationName,
+ }
+}
+
+// Key adds the given named key to the Query map.
+// Returns a Value encoder that should be used to encode a Query value type.
+func (m *Map) Key(name string) Value {
+ // Query lists start a 1, so adjust the size first
+ m.size++
+ var key string
+ var value string
+ if m.flat {
+ key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName)
+ value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName)
+ } else {
+ key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName)
+ value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName)
+ }
+
+ // The key can only be a string, so we just go ahead and set it here
+ newValue(m.values, key, false).String(name)
+
+ // Maps can't have flat members
+ return newValue(m.values, value, false)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go
new file mode 100644
index 000000000..360344791
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go
@@ -0,0 +1,62 @@
+package query
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the
+// operation serializer that will convert the query request body to a GET
+// operation with the query message in the HTTP request querystring.
+func AddAsGetRequestMiddleware(stack *middleware.Stack) error {
+ return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After)
+}
+
+type asGetRequest struct{}
+
+func (*asGetRequest) ID() string { return "Query:AsGetRequest" }
+
+func (m *asGetRequest) HandleSerialize(
+ ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request)
+ }
+
+ req.Method = "GET"
+
+ // If the stream is not set, nothing else to do.
+ stream := req.GetStream()
+ if stream == nil {
+ return next.HandleSerialize(ctx, input)
+ }
+
+ // Clear the stream since there will not be any body.
+ req.Header.Del("Content-Type")
+ req, err = req.SetStream(nil)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable update request body %w", err)
+ }
+ input.Request = req
+
+ // Update request query with the body's query string value.
+ delim := ""
+ if len(req.URL.RawQuery) != 0 {
+ delim = "&"
+ }
+
+ b, err := ioutil.ReadAll(stream)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable to get request body %w", err)
+ }
+ req.URL.RawQuery += delim + string(b)
+
+ return next.HandleSerialize(ctx, input)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go
new file mode 100644
index 000000000..305a8ace3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go
@@ -0,0 +1,68 @@
+package query
+
+import "net/url"
+
+// Object represents the encoding of Query structures and unions. A Query
+// object is a representation of a mapping of string keys to arbitrary
+// values where there is a fixed set of keys whose values each have their
+// own known type. A serialized object might look like the following:
+//
+// ObjectName.Foo=value
+// &ObjectName.Bar=5
+type Object struct {
+ // The query values to add the object to.
+ values url.Values
+ // The object's prefix, which includes the names of all parent structures
+ // and ends with the name of the object. For example, the prefix might be
+ // "ParentStructure.ObjectName". This prefix will be used to form the full
+ // keys for each member of the object. For example, a member might have the
+ // key "ParentStructure.ObjectName.MemberName".
+ //
+ // While this is currently represented as a string that gets added to, it
+ // could also be represented as a stack that only gets condensed into a
+ // string when a finalized key is created. This could potentially reduce
+ // allocations.
+ prefix string
+}
+
+func newObject(values url.Values, prefix string) *Object {
+ return &Object{
+ values: values,
+ prefix: prefix,
+ }
+}
+
+// Key adds the given named key to the Query object.
+// Returns a Value encoder that should be used to encode a Query value type.
+func (o *Object) Key(name string) Value {
+ return o.key(name, false)
+}
+
+// KeyWithValues adds the given named key to the Query object.
+// Returns a Value encoder that should be used to encode a Query list of values.
+func (o *Object) KeyWithValues(name string) Value {
+ return o.keyWithValues(name, false)
+}
+
+// FlatKey adds the given named key to the Query object.
+// Returns a Value encoder that should be used to encode a Query value type. The
+// value will be flattened if it is a map or array.
+func (o *Object) FlatKey(name string) Value {
+ return o.key(name, true)
+}
+
+func (o *Object) key(name string, flatValue bool) Value {
+ if o.prefix != "" {
+ // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead
+ return newValue(o.values, o.prefix+keySeparator+name, flatValue)
+ }
+ return newValue(o.values, name, flatValue)
+}
+
+func (o *Object) keyWithValues(name string, flatValue bool) Value {
+ if o.prefix != "" {
+ // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead
+ return newAppendValue(o.values, o.prefix+keySeparator+name, flatValue)
+ }
+ return newAppendValue(o.values, name, flatValue)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go
new file mode 100644
index 000000000..8063c592d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go
@@ -0,0 +1,117 @@
+package query
+
+import (
+ "math/big"
+ "net/url"
+
+ "github.com/aws/smithy-go/encoding/httpbinding"
+)
+
+const keySeparator = "."
+
+// Value represents a Query Value type.
+type Value struct {
+ // The query values to add the value to.
+ values url.Values
+ // The value's key, which will form the prefix for complex types.
+ key string
+ // Whether the value should be flattened or not if it's a flattenable type.
+ flat bool
+ queryValue httpbinding.QueryValue
+}
+
+func newValue(values url.Values, key string, flat bool) Value {
+ return Value{
+ values: values,
+ key: key,
+ flat: flat,
+ queryValue: httpbinding.NewQueryValue(values, key, false),
+ }
+}
+
+func newAppendValue(values url.Values, key string, flat bool) Value {
+ return Value{
+ values: values,
+ key: key,
+ flat: flat,
+ queryValue: httpbinding.NewQueryValue(values, key, true),
+ }
+}
+
+func newBaseValue(values url.Values) Value {
+ return Value{
+ values: values,
+ queryValue: httpbinding.NewQueryValue(nil, "", false),
+ }
+}
+
+// Array returns a new Array encoder.
+func (qv Value) Array(locationName string) *Array {
+ return newArray(qv.values, qv.key, qv.flat, locationName)
+}
+
+// Object returns a new Object encoder.
+func (qv Value) Object() *Object {
+ return newObject(qv.values, qv.key)
+}
+
+// Map returns a new Map encoder.
+func (qv Value) Map(keyLocationName string, valueLocationName string) *Map {
+ return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName)
+}
+
+// Base64EncodeBytes encodes v as a base64 query string value.
+// This is intended to enable compatibility with the JSON encoder.
+func (qv Value) Base64EncodeBytes(v []byte) {
+ qv.queryValue.Blob(v)
+}
+
+// Boolean encodes v as a query string value
+func (qv Value) Boolean(v bool) {
+ qv.queryValue.Boolean(v)
+}
+
+// String encodes v as a query string value
+func (qv Value) String(v string) {
+ qv.queryValue.String(v)
+}
+
+// Byte encodes v as a query string value
+func (qv Value) Byte(v int8) {
+ qv.queryValue.Byte(v)
+}
+
+// Short encodes v as a query string value
+func (qv Value) Short(v int16) {
+ qv.queryValue.Short(v)
+}
+
+// Integer encodes v as a query string value
+func (qv Value) Integer(v int32) {
+ qv.queryValue.Integer(v)
+}
+
+// Long encodes v as a query string value
+func (qv Value) Long(v int64) {
+ qv.queryValue.Long(v)
+}
+
+// Float encodes v as a query string value
+func (qv Value) Float(v float32) {
+ qv.queryValue.Float(v)
+}
+
+// Double encodes v as a query string value
+func (qv Value) Double(v float64) {
+ qv.queryValue.Double(v)
+}
+
+// BigInteger encodes v as a query string value
+func (qv Value) BigInteger(v *big.Int) {
+ qv.queryValue.BigInteger(v)
+}
+
+// BigDecimal encodes v as a query string value
+func (qv Value) BigDecimal(v *big.Float) {
+ qv.queryValue.BigDecimal(v)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go
new file mode 100644
index 000000000..1bce78a4d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go
@@ -0,0 +1,85 @@
+package restjson
+
+import (
+ "encoding/json"
+ "io"
+ "strings"
+
+ "github.com/aws/smithy-go"
+)
+
+// GetErrorInfo util looks for code, __type, and message members in the
+// json body. These members are optionally available, and the function
+// returns the value of member if it is available. This function is useful to
+// identify the error code, msg in a REST JSON error response.
+func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) {
+ var errInfo struct {
+ Code string
+ Type string `json:"__type"`
+ Message string
+ }
+
+ err = decoder.Decode(&errInfo)
+ if err != nil {
+ if err == io.EOF {
+ return errorType, message, nil
+ }
+ return errorType, message, err
+ }
+
+ // assign error type
+ if len(errInfo.Code) != 0 {
+ errorType = errInfo.Code
+ } else if len(errInfo.Type) != 0 {
+ errorType = errInfo.Type
+ }
+
+ // assign error message
+ if len(errInfo.Message) != 0 {
+ message = errInfo.Message
+ }
+
+ // sanitize error
+ if len(errorType) != 0 {
+ errorType = SanitizeErrorCode(errorType)
+ }
+
+ return errorType, message, nil
+}
+
+// SanitizeErrorCode sanitizes the errorCode string .
+// The rule for sanitizing is if a `:` character is present, then take only the
+// contents before the first : character in the value.
+// If a # character is present, then take only the contents after the
+// first # character in the value.
+func SanitizeErrorCode(errorCode string) string {
+ if strings.ContainsAny(errorCode, ":") {
+ errorCode = strings.SplitN(errorCode, ":", 2)[0]
+ }
+
+ if strings.ContainsAny(errorCode, "#") {
+ errorCode = strings.SplitN(errorCode, "#", 2)[1]
+ }
+
+ return errorCode
+}
+
+// GetSmithyGenericAPIError returns smithy generic api error and an error interface.
+// Takes in json decoder, and error Code string as args. The function retrieves error message
+// and error code from the decoder body. If errorCode of length greater than 0 is passed in as
+// an argument, it is used instead.
+func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) {
+ errorType, message, err := GetErrorInfo(decoder)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(errorCode) == 0 {
+ errorCode = errorType
+ }
+
+ return &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: message,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go
new file mode 100644
index 000000000..6975ce652
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go
@@ -0,0 +1,48 @@
+package xml
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+)
+
+// ErrorComponents represents the error response fields
+// that will be deserialized from an xml error response body
+type ErrorComponents struct {
+ Code string
+ Message string
+ RequestID string
+}
+
+// GetErrorResponseComponents returns the error fields from an xml error response body
+func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) {
+ if noErrorWrapping {
+ var errResponse noWrappedErrorResponse
+ if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+ }
+ return ErrorComponents(errResponse), nil
+ }
+
+ var errResponse wrappedErrorResponse
+ if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+ }
+ return ErrorComponents(errResponse), nil
+}
+
+// noWrappedErrorResponse represents the error response body with
+// no internal Error wrapping
+type noWrappedErrorResponse struct {
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+// wrappedErrorResponse represents the error response body
+// wrapped within Error
+type wrappedErrorResponse struct {
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go
new file mode 100644
index 000000000..8c7836410
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go
@@ -0,0 +1,20 @@
+package ratelimit
+
+import "context"
+
+// None implements a no-op rate limiter which effectively disables client-side
+// rate limiting (also known as "retry quotas").
+//
+// GetToken does nothing and always returns a nil error. The returned
+// token-release function does nothing, and always returns a nil error.
+//
+// AddTokens does nothing and always returns a nil error.
+var None = &none{}
+
+type none struct{}
+
+func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) {
+ return func() error { return nil }, nil
+}
+
+func (*none) AddTokens(v uint) error { return nil }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go
new file mode 100644
index 000000000..974ef594f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go
@@ -0,0 +1,96 @@
+package ratelimit
+
+import (
+ "sync"
+)
+
+// TokenBucket provides a concurrency safe utility for adding and removing
+// tokens from the available token bucket.
+type TokenBucket struct {
+ remainingTokens uint
+ maxCapacity uint
+ minCapacity uint
+ mu sync.Mutex
+}
+
+// NewTokenBucket returns an initialized TokenBucket with the capacity
+// specified.
+func NewTokenBucket(i uint) *TokenBucket {
+ return &TokenBucket{
+ remainingTokens: i,
+ maxCapacity: i,
+ minCapacity: 1,
+ }
+}
+
+// Retrieve attempts to reduce the available tokens by the amount requested. If
+// there are tokens available true will be returned along with the number of
+// available tokens remaining. If amount requested is larger than the available
+// capacity, false will be returned along with the available capacity. If the
+// amount is less than the available capacity, the capacity will be reduced by
+// that amount, and the remaining capacity and true will be returned.
+func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if amount > t.remainingTokens {
+ return t.remainingTokens, false
+ }
+
+ t.remainingTokens -= amount
+ return t.remainingTokens, true
+}
+
+// Refund returns the amount of tokens back to the available token bucket, up
+// to the initial capacity.
+func (t *TokenBucket) Refund(amount uint) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ // Capacity cannot exceed max capacity.
+ t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity)
+}
+
+// Capacity returns the maximum capacity of tokens that the bucket could
+// contain.
+func (t *TokenBucket) Capacity() uint {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ return t.maxCapacity
+}
+
+// Remaining returns the number of tokens that remaining in the bucket.
+func (t *TokenBucket) Remaining() uint {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ return t.remainingTokens
+}
+
+// Resize adjusts the size of the token bucket. Returns the capacity remaining.
+func (t *TokenBucket) Resize(size uint) uint {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ t.maxCapacity = uintMax(size, t.minCapacity)
+
+ // Capacity needs to be capped at max capacity, if max size reduced.
+ t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity)
+
+ return t.remainingTokens
+}
+
+func uintMin(a, b uint) uint {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func uintMax(a, b uint) uint {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go
new file mode 100644
index 000000000..d89090ad3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go
@@ -0,0 +1,83 @@
+package ratelimit
+
+import (
+ "context"
+ "fmt"
+)
+
+type rateToken struct {
+ tokenCost uint
+ bucket *TokenBucket
+}
+
+func (t rateToken) release() error {
+ t.bucket.Refund(t.tokenCost)
+ return nil
+}
+
+// TokenRateLimit provides a Token Bucket RateLimiter implementation
+// that limits the overall number of retry attempts that can be made across
+// operation invocations.
+type TokenRateLimit struct {
+ bucket *TokenBucket
+}
+
+// NewTokenRateLimit returns an TokenRateLimit with default values.
+// Functional options can configure the retry rate limiter.
+func NewTokenRateLimit(tokens uint) *TokenRateLimit {
+ return &TokenRateLimit{
+ bucket: NewTokenBucket(tokens),
+ }
+}
+
+type canceledError struct {
+ Err error
+}
+
+func (c canceledError) CanceledError() bool { return true }
+func (c canceledError) Unwrap() error { return c.Err }
+func (c canceledError) Error() string {
+ return fmt.Sprintf("canceled, %v", c.Err)
+}
+
+// GetToken may cause a available pool of retry quota to be
+// decremented. Will return an error if the decremented value can not be
+// reduced from the retry quota.
+func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) {
+ select {
+ case <-ctx.Done():
+ return nil, canceledError{Err: ctx.Err()}
+ default:
+ }
+ if avail, ok := l.bucket.Retrieve(cost); !ok {
+ return nil, QuotaExceededError{Available: avail, Requested: cost}
+ }
+
+ return rateToken{
+ tokenCost: cost,
+ bucket: l.bucket,
+ }.release, nil
+}
+
+// AddTokens increments the token bucket by a fixed amount.
+func (l *TokenRateLimit) AddTokens(v uint) error {
+ l.bucket.Refund(v)
+ return nil
+}
+
+// Remaining returns the number of remaining tokens in the bucket.
+func (l *TokenRateLimit) Remaining() uint {
+ return l.bucket.Remaining()
+}
+
+// QuotaExceededError provides the SDK error when the retries for a given
+// token bucket have been exhausted.
+type QuotaExceededError struct {
+ Available uint
+ Requested uint
+}
+
+func (e QuotaExceededError) Error() string {
+ return fmt.Sprintf("retry quota exceeded, %d available, %d requested",
+ e.Available, e.Requested)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go
new file mode 100644
index 000000000..d8d00e615
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go
@@ -0,0 +1,25 @@
+package aws
+
+import (
+ "fmt"
+)
+
+// TODO remove replace with smithy.CanceledError
+
+// RequestCanceledError is the error that will be returned by an API request
+// that was canceled. Requests given a Context may return this error when
+// canceled.
+type RequestCanceledError struct {
+ Err error
+}
+
+// CanceledError returns true to satisfy interfaces checking for canceled errors.
+func (*RequestCanceledError) CanceledError() bool { return true }
+
+// Unwrap returns the underlying error, if there was one.
+func (e *RequestCanceledError) Unwrap() error {
+ return e.Err
+}
+func (e *RequestCanceledError) Error() string {
+ return fmt.Sprintf("request canceled, %v", e.Err)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go
new file mode 100644
index 000000000..4dfde8573
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go
@@ -0,0 +1,156 @@
+package retry
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+const (
+ // DefaultRequestCost is the cost of a single request from the adaptive
+ // rate limited token bucket.
+ DefaultRequestCost uint = 1
+)
+
+// DefaultThrottles provides the set of errors considered throttle errors that
+// are checked by default.
+var DefaultThrottles = []IsErrorThrottle{
+ ThrottleErrorCode{
+ Codes: DefaultThrottleErrorCodes,
+ },
+}
+
+// AdaptiveModeOptions provides the functional options for configuring the
+// adaptive retry mode, and delay behavior.
+type AdaptiveModeOptions struct {
+ // If the adaptive token bucket is empty, when an attempt will be made
+ // AdaptiveMode will sleep until a token is available. This can occur when
+ // attempts fail with throttle errors. Use this option to disable the sleep
+ // until token is available, and return error immediately.
+ FailOnNoAttemptTokens bool
+
+ // The cost of an attempt from the AdaptiveMode's adaptive token bucket.
+ RequestCost uint
+
+ // Set of strategies to determine if the attempt failed due to a throttle
+ // error.
+ //
+ // It is safe to append to this list in NewAdaptiveMode's functional options.
+ Throttles []IsErrorThrottle
+
+ // Set of options for standard retry mode that AdaptiveMode is built on top
+ // of. AdaptiveMode may apply its own defaults to Standard retry mode that
+ // are different than the defaults of NewStandard. Use these options to
+ // override the default options.
+ StandardOptions []func(*StandardOptions)
+}
+
+// AdaptiveMode provides an experimental retry strategy that expands on the
+// Standard retry strategy, adding client attempt rate limits. The attempt rate
+// limit is initially unrestricted, but becomes restricted when the attempt
+// fails with for a throttle error. When restricted AdaptiveMode may need to
+// sleep before an attempt is made, if too many throttles have been received.
+// AdaptiveMode's sleep can be canceled with context cancel. Set
+// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep,
+// to fail fast.
+//
+// Eventually unrestricted attempt rate limit will be restored once attempts no
+// longer are failing due to throttle errors.
+type AdaptiveMode struct {
+ options AdaptiveModeOptions
+ throttles IsErrorThrottles
+
+ retryer aws.RetryerV2
+ rateLimit *adaptiveRateLimit
+}
+
+// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy.
+func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode {
+ o := AdaptiveModeOptions{
+ RequestCost: DefaultRequestCost,
+ Throttles: append([]IsErrorThrottle{}, DefaultThrottles...),
+ }
+ for _, fn := range optFns {
+ fn(&o)
+ }
+
+ return &AdaptiveMode{
+ options: o,
+ throttles: IsErrorThrottles(o.Throttles),
+ retryer: NewStandard(o.StandardOptions...),
+ rateLimit: newAdaptiveRateLimit(),
+ }
+}
+
+// IsErrorRetryable returns if the failed attempt is retryable. This check
+// should determine if the error can be retried, or if the error is
+// terminal.
+func (a *AdaptiveMode) IsErrorRetryable(err error) bool {
+ return a.retryer.IsErrorRetryable(err)
+}
+
+// MaxAttempts returns the maximum number of attempts that can be made for
+// an attempt before failing. A value of 0 implies that the attempt should
+// be retried until it succeeds if the errors are retryable.
+func (a *AdaptiveMode) MaxAttempts() int {
+ return a.retryer.MaxAttempts()
+}
+
+// RetryDelay returns the delay that should be used before retrying the
+// attempt. Will return error if the if the delay could not be determined.
+func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) (
+ time.Duration, error,
+) {
+ return a.retryer.RetryDelay(attempt, opErr)
+}
+
+// GetRetryToken attempts to deduct the retry cost from the retry token pool.
+// Returning the token release function, or error.
+func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) (
+ releaseToken func(error) error, err error,
+) {
+ return a.retryer.GetRetryToken(ctx, opErr)
+}
+
+// GetInitialToken returns the initial attempt token that can increment the
+// retry token pool if the attempt is successful.
+//
+// Deprecated: This method does not provide a way to block using Context,
+// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only
+// present to implement Retryer interface.
+func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) {
+ return nopRelease
+}
+
+// GetAttemptToken returns the attempt token that can be used to rate limit
+// attempt calls. Will be used by the SDK's retry package's Attempt
+// middleware to get an attempt token prior to calling the temp and releasing
+// the attempt token after the attempt has been made.
+func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) {
+ for {
+ acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost)
+ if acquiredToken {
+ break
+ }
+ if a.options.FailOnNoAttemptTokens {
+ return nil, fmt.Errorf(
+ "unable to get attempt token, and FailOnNoAttemptTokens enables")
+ }
+
+ if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil {
+ return nil, fmt.Errorf("failed to wait for token to be available, %w", err)
+ }
+ }
+
+ return a.handleResponse, nil
+}
+
+func (a *AdaptiveMode) handleResponse(opErr error) error {
+ throttled := a.throttles.IsErrorThrottle(opErr).Bool()
+
+ a.rateLimit.Update(throttled)
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go
new file mode 100644
index 000000000..ad96d9b8c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go
@@ -0,0 +1,158 @@
+package retry
+
+import (
+ "math"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+type adaptiveRateLimit struct {
+ tokenBucketEnabled bool
+
+ smooth float64
+ beta float64
+ scaleConstant float64
+ minFillRate float64
+
+ fillRate float64
+ calculatedRate float64
+ lastRefilled time.Time
+ measuredTxRate float64
+ lastTxRateBucket float64
+ requestCount int64
+ lastMaxRate float64
+ lastThrottleTime time.Time
+ timeWindow float64
+
+ tokenBucket *adaptiveTokenBucket
+
+ mu sync.Mutex
+}
+
+func newAdaptiveRateLimit() *adaptiveRateLimit {
+ now := sdk.NowTime()
+ return &adaptiveRateLimit{
+ smooth: 0.8,
+ beta: 0.7,
+ scaleConstant: 0.4,
+
+ minFillRate: 0.5,
+
+ lastTxRateBucket: math.Floor(timeFloat64Seconds(now)),
+ lastThrottleTime: now,
+
+ tokenBucket: newAdaptiveTokenBucket(0),
+ }
+}
+
+func (a *adaptiveRateLimit) Enable(v bool) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.tokenBucketEnabled = v
+}
+
+func (a *adaptiveRateLimit) AcquireToken(amount uint) (
+ tokenAcquired bool, waitTryAgain time.Duration,
+) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ if !a.tokenBucketEnabled {
+ return true, 0
+ }
+
+ a.tokenBucketRefill()
+
+ available, ok := a.tokenBucket.Retrieve(float64(amount))
+ if !ok {
+ waitDur := float64Seconds((float64(amount) - available) / a.fillRate)
+ return false, waitDur
+ }
+
+ return true, 0
+}
+
+func (a *adaptiveRateLimit) Update(throttled bool) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ a.updateMeasuredRate()
+
+ if throttled {
+ rateToUse := a.measuredTxRate
+ if a.tokenBucketEnabled {
+ rateToUse = math.Min(a.measuredTxRate, a.fillRate)
+ }
+
+ a.lastMaxRate = rateToUse
+ a.calculateTimeWindow()
+ a.lastThrottleTime = sdk.NowTime()
+ a.calculatedRate = a.cubicThrottle(rateToUse)
+ a.tokenBucketEnabled = true
+ } else {
+ a.calculateTimeWindow()
+ a.calculatedRate = a.cubicSuccess(sdk.NowTime())
+ }
+
+ newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate)
+ a.tokenBucketUpdateRate(newRate)
+}
+
+func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 {
+ dt := secondsFloat64(t.Sub(a.lastThrottleTime))
+ return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate
+}
+
+func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 {
+ return rateToUse * a.beta
+}
+
+func (a *adaptiveRateLimit) calculateTimeWindow() {
+ a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.)
+}
+
+func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) {
+ a.tokenBucketRefill()
+ a.fillRate = math.Max(newRPS, a.minFillRate)
+ a.tokenBucket.Resize(newRPS)
+}
+
+func (a *adaptiveRateLimit) updateMeasuredRate() {
+ now := sdk.NowTime()
+ timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2.
+ a.requestCount++
+
+ if timeBucket > a.lastTxRateBucket {
+ currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket)
+ a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth))
+ a.requestCount = 0
+ a.lastTxRateBucket = timeBucket
+ }
+}
+
+func (a *adaptiveRateLimit) tokenBucketRefill() {
+ now := sdk.NowTime()
+ if a.lastRefilled.IsZero() {
+ a.lastRefilled = now
+ return
+ }
+
+ fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate
+ a.tokenBucket.Refund(fillAmount)
+ a.lastRefilled = now
+}
+
+func float64Seconds(v float64) time.Duration {
+ return time.Duration(v * float64(time.Second))
+}
+
+func secondsFloat64(v time.Duration) float64 {
+ return float64(v) / float64(time.Second)
+}
+
+func timeFloat64Seconds(v time.Time) float64 {
+ return float64(v.UnixNano()) / float64(time.Second)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go
new file mode 100644
index 000000000..052723e8e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go
@@ -0,0 +1,83 @@
+package retry
+
+import (
+ "math"
+ "sync"
+)
+
+// adaptiveTokenBucket provides a concurrency safe utility for adding and
+// removing tokens from the available token bucket.
+type adaptiveTokenBucket struct {
+ remainingTokens float64
+ maxCapacity float64
+ minCapacity float64
+ mu sync.Mutex
+}
+
+// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the
+// capacity specified.
+func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket {
+ return &adaptiveTokenBucket{
+ remainingTokens: i,
+ maxCapacity: i,
+ minCapacity: 1,
+ }
+}
+
+// Retrieve attempts to reduce the available tokens by the amount requested. If
+// there are tokens available true will be returned along with the number of
+// available tokens remaining. If amount requested is larger than the available
+// capacity, false will be returned along with the available capacity. If the
+// amount is less than the available capacity, the capacity will be reduced by
+// that amount, and the remaining capacity and true will be returned.
+func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if amount > t.remainingTokens {
+ return t.remainingTokens, false
+ }
+
+ t.remainingTokens -= amount
+ return t.remainingTokens, true
+}
+
+// Refund returns the amount of tokens back to the available token bucket, up
+// to the initial capacity.
+func (t *adaptiveTokenBucket) Refund(amount float64) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ // Capacity cannot exceed max capacity.
+ t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity)
+}
+
+// Capacity returns the maximum capacity of tokens that the bucket could
+// contain.
+func (t *adaptiveTokenBucket) Capacity() float64 {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ return t.maxCapacity
+}
+
+// Remaining returns the number of tokens that remaining in the bucket.
+func (t *adaptiveTokenBucket) Remaining() float64 {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ return t.remainingTokens
+}
+
+// Resize adjusts the size of the token bucket. Returns the capacity remaining.
+func (t *adaptiveTokenBucket) Resize(size float64) float64 {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ t.maxCapacity = math.Max(size, t.minCapacity)
+
+ // Capacity needs to be capped at max capacity, if max size reduced.
+ t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity)
+
+ return t.remainingTokens
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go
new file mode 100644
index 000000000..bfa5bf7d1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go
@@ -0,0 +1,51 @@
+package retry
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type attemptMetrics struct {
+ Attempts metrics.Int64Counter
+ Errors metrics.Int64Counter
+
+ AttemptDuration metrics.Float64Histogram
+}
+
+func newAttemptMetrics(meter metrics.Meter) (*attemptMetrics, error) {
+ m := &attemptMetrics{}
+ var err error
+
+ m.Attempts, err = meter.Int64Counter("client.call.attempts", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "{attempt}"
+ o.Description = "The number of attempts for an individual operation"
+ })
+ if err != nil {
+ return nil, err
+ }
+ m.Errors, err = meter.Int64Counter("client.call.errors", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "{error}"
+ o.Description = "The number of errors for an operation"
+ })
+ if err != nil {
+ return nil, err
+ }
+ m.AttemptDuration, err = meter.Float64Histogram("client.call.attempt_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "The time it takes to connect to the service, send the request, and get back HTTP status code and headers (including time queued waiting to be sent)"
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
+
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go
new file mode 100644
index 000000000..3a08ebe0a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go
@@ -0,0 +1,80 @@
+// Package retry provides interfaces and implementations for SDK request retry behavior.
+//
+// # Retryer Interface and Implementations
+//
+// This package defines Retryer interface that is used to either implement custom retry behavior
+// or to extend the existing retry implementations provided by the SDK. This package provides a single
+// retry implementation: Standard.
+//
+// # Standard
+//
+// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited
+// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs.
+// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client,
+// and uses an additional delay policy to limit the time between a requests subsequent attempts.
+//
+// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether
+// a given error is retryable. By default this list of retryables includes the following:
+// - Retrying errors that implement the RetryableError method, and return true.
+// - Connection Errors
+// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true.
+// - Connection Reset Errors.
+// - net.OpErr types that are dialing errors or are temporary.
+// - HTTP Status Codes: 500, 502, 503, and 504.
+// - API Error Codes
+// - RequestTimeout, RequestTimeoutException
+// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException,
+// RequestThrottled, SlowDown, EC2ThrottledException
+// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException
+// - TransactionInProgressException, PriorRequestNotComplete
+//
+// The standard retryer will not retry a request in the event if the context associated with the request
+// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context
+// value.
+//
+// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer
+// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions
+// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions,
+// and the retry delay policy.
+//
+// For example to modify the default retry attempts for the standard retryer:
+//
+// // configure the custom retryer
+// customRetry := retry.NewStandard(func(o *retry.StandardOptions) {
+// o.MaxAttempts = 5
+// })
+//
+// // create a service client with the retryer
+// s3.NewFromConfig(cfg, func(o *s3.Options) {
+// o.Retryer = customRetry
+// })
+//
+// # Utilities
+//
+// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic
+// way. These are:
+//
+// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable
+// in addition to those considered retryable by the provided retryer.
+//
+// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping
+// a retryer implementation.
+//
+// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a
+// request by wrapping a retryer implementation.
+//
+// The following package functions have been provided to easily satisfy different retry interfaces to further customize
+// a given retryer's behavior:
+//
+// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example,
+// you can use this method to easily create custom back off policies to be used with the
+// standard retryer.
+//
+// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example,
+// this can be used to extend the standard retryer to add additional logic to determine if an
+// error should be retried.
+//
+// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example,
+// this can be used to extend the standard retryer to add additional logic to determine if an
+// error should be considered a timeout.
+package retry
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go
new file mode 100644
index 000000000..3e432eefe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go
@@ -0,0 +1,20 @@
+package retry
+
+import "fmt"
+
+// MaxAttemptsError provides the error when the maximum number of attempts have
+// been exceeded.
+type MaxAttemptsError struct {
+ Attempt int
+ Err error
+}
+
+func (e *MaxAttemptsError) Error() string {
+ return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err)
+}
+
+// Unwrap returns the nested error causing the max attempts error. Provides the
+// implementation for errors.Is and errors.As to unwrap nested errors.
+func (e *MaxAttemptsError) Unwrap() error {
+ return e.Err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go
new file mode 100644
index 000000000..c266996de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go
@@ -0,0 +1,49 @@
+package retry
+
+import (
+ "math"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/internal/rand"
+ "github.com/aws/aws-sdk-go-v2/internal/timeconv"
+)
+
+// ExponentialJitterBackoff provides backoff delays with jitter based on the
+// number of attempts.
+type ExponentialJitterBackoff struct {
+ maxBackoff time.Duration
+ // precomputed number of attempts needed to reach max backoff.
+ maxBackoffAttempts float64
+
+ randFloat64 func() (float64, error)
+}
+
+// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured
+// for the max backoff.
+func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff {
+ return &ExponentialJitterBackoff{
+ maxBackoff: maxBackoff,
+ maxBackoffAttempts: math.Log2(
+ float64(maxBackoff) / float64(time.Second)),
+ randFloat64: rand.CryptoRandFloat64,
+ }
+}
+
+// BackoffDelay returns the duration to wait before the next attempt should be
+// made. Returns an error if unable get a duration.
+func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) {
+ if attempt > int(j.maxBackoffAttempts) {
+ return j.maxBackoff, nil
+ }
+
+ b, err := j.randFloat64()
+ if err != nil {
+ return 0, err
+ }
+
+ // [0.0, 1.0) * 2 ^ attempts
+ ri := int64(1 << uint64(attempt))
+ delaySeconds := b * float64(ri)
+
+ return timeconv.FloatSecondsDur(delaySeconds), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go
new file mode 100644
index 000000000..7a3f18301
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go
@@ -0,0 +1,52 @@
+package retry
+
+import (
+ awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// attemptResultsKey is a metadata accessor key to retrieve metadata
+// for all request attempts.
+type attemptResultsKey struct {
+}
+
+// GetAttemptResults retrieves attempts results from middleware metadata.
+func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) {
+ m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults)
+ return m, ok
+}
+
+// AttemptResults represents struct containing metadata returned by all request attempts.
+type AttemptResults struct {
+
+ // Results is a slice consisting attempt result from all request attempts.
+ // Results are stored in order request attempt is made.
+ Results []AttemptResult
+}
+
+// AttemptResult represents attempt result returned by a single request attempt.
+type AttemptResult struct {
+
+ // Err is the error if received for the request attempt.
+ Err error
+
+ // Retryable denotes if request may be retried. This states if an
+ // error is considered retryable.
+ Retryable bool
+
+ // Retried indicates if this request was retried.
+ Retried bool
+
+ // ResponseMetadata is any existing metadata passed via the response middlewares.
+ ResponseMetadata middleware.Metadata
+}
+
+// addAttemptResults adds attempt results to middleware metadata
+func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) {
+ metadata.Set(attemptResultsKey{}, v)
+}
+
+// GetRawResponse returns raw response recorded for the attempt result
+func (a AttemptResult) GetRawResponse() interface{} {
+ return awsmiddle.GetRawResponse(a.ResponseMetadata)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
new file mode 100644
index 000000000..5549922ab
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
@@ -0,0 +1,418 @@
+package retry
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/smithy-go"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ smithymiddle "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ "github.com/aws/smithy-go/transport/http"
+)
+
+// RequestCloner is a function that can take an input request type and clone
+// the request for use in a subsequent retry attempt.
+type RequestCloner func(interface{}) interface{}
+
+type retryMetadata struct {
+ AttemptNum int
+ AttemptTime time.Time
+ MaxAttempts int
+ AttemptClockSkew time.Duration
+}
+
+// Attempt is a Smithy Finalize middleware that handles retry attempts using
+// the provided Retryer implementation.
+type Attempt struct {
+ // Enable the logging of retry attempts performed by the SDK. This will
+ // include logging retry attempts, unretryable errors, and when max
+ // attempts are reached.
+ LogAttempts bool
+
+ // A Meter instance for recording retry-related metrics.
+ OperationMeter metrics.Meter
+
+ retryer aws.RetryerV2
+ requestCloner RequestCloner
+}
+
+// define the threshold at which we will consider certain kind of errors to be probably
+// caused by clock skew
+const skewThreshold = 4 * time.Minute
+
+// NewAttemptMiddleware returns a new Attempt retry middleware.
+func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt {
+ m := &Attempt{
+ retryer: wrapAsRetryerV2(retryer),
+ requestCloner: requestCloner,
+ }
+ for _, fn := range optFns {
+ fn(m)
+ }
+ if m.OperationMeter == nil {
+ m.OperationMeter = metrics.NopMeterProvider{}.Meter("")
+ }
+
+ return m
+}
+
+// ID returns the middleware identifier
+func (r *Attempt) ID() string { return "Retry" }
+
+func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) {
+ if !r.LogAttempts {
+ return
+ }
+ logger.Logf(classification, format, v...)
+}
+
+// HandleFinalize utilizes the provider Retryer implementation to attempt
+// retries over the next handler
+func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
+ out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error,
+) {
+ var attemptNum int
+ var attemptClockSkew time.Duration
+ var attemptResults AttemptResults
+
+ maxAttempts := r.retryer.MaxAttempts()
+ releaseRetryToken := nopRelease
+
+ retryMetrics, err := newAttemptMetrics(r.OperationMeter)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ for {
+ attemptNum++
+ attemptInput := in
+ attemptInput.Request = r.requestCloner(attemptInput.Request)
+
+ // Record the metadata for the for attempt being started.
+ attemptCtx := setRetryMetadata(ctx, retryMetadata{
+ AttemptNum: attemptNum,
+ AttemptTime: sdk.NowTime().UTC(),
+ MaxAttempts: maxAttempts,
+ AttemptClockSkew: attemptClockSkew,
+ })
+
+ // Setting clock skew to be used on other context (like signing)
+ ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew)
+
+ var attemptResult AttemptResult
+
+ attemptCtx, span := tracing.StartSpan(attemptCtx, "Attempt", func(o *tracing.SpanOptions) {
+ o.Properties.Set("operation.attempt", attemptNum)
+ })
+ retryMetrics.Attempts.Add(ctx, 1, withOperationMetadata(ctx))
+
+ start := sdk.NowTime()
+ out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next)
+ elapsed := sdk.NowTime().Sub(start)
+
+ retryMetrics.AttemptDuration.Record(ctx, float64(elapsed)/1e9, withOperationMetadata(ctx))
+ if err != nil {
+ retryMetrics.Errors.Add(ctx, 1, withOperationMetadata(ctx), func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("exception.type", errorType(err))
+ })
+ }
+
+ span.End()
+
+ attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata)
+
+ // AttemptResult Retried states that the attempt was not successful, and
+ // should be retried.
+ shouldRetry := attemptResult.Retried
+
+ // Add attempt metadata to list of all attempt metadata
+ attemptResults.Results = append(attemptResults.Results, attemptResult)
+
+ if !shouldRetry {
+ // Ensure the last response's metadata is used as the bases for result
+ // metadata returned by the stack. The Slice of attempt results
+ // will be added to this cloned metadata.
+ metadata = attemptResult.ResponseMetadata.Clone()
+
+ break
+ }
+ }
+
+ addAttemptResults(&metadata, attemptResults)
+ return out, metadata, err
+}
+
+// handleAttempt handles an individual request attempt.
+func (r *Attempt) handleAttempt(
+ ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler,
+) (
+ out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error,
+) {
+ defer func() {
+ attemptResult.Err = err
+ }()
+
+ // Short circuit if this attempt never can succeed because the context is
+ // canceled. This reduces the chance of token pools being modified for
+ // attempts that will not be made
+ select {
+ case <-ctx.Done():
+ return out, attemptResult, nopRelease, ctx.Err()
+ default:
+ }
+
+ //------------------------------
+ // Get Attempt Token
+ //------------------------------
+ releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx)
+ if err != nil {
+ return out, attemptResult, nopRelease, fmt.Errorf(
+ "failed to get retry Send token, %w", err)
+ }
+
+ //------------------------------
+ // Send Attempt
+ //------------------------------
+ logger := smithymiddle.GetLogger(ctx)
+ service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx)
+ retryMetadata, _ := getRetryMetadata(ctx)
+ attemptNum := retryMetadata.AttemptNum
+ maxAttempts := retryMetadata.MaxAttempts
+
+ // Following attempts must ensure the request payload stream starts in a
+ // rewound state.
+ if attemptNum > 1 {
+ if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok {
+ if rewindErr := rewindable.RewindStream(); rewindErr != nil {
+ return out, attemptResult, nopRelease, fmt.Errorf(
+ "failed to rewind transport stream for retry, %w", rewindErr)
+ }
+ }
+
+ r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d",
+ service, operation, attemptNum)
+ }
+
+ var metadata smithymiddle.Metadata
+ out, metadata, err = next.HandleFinalize(ctx, in)
+ attemptResult.ResponseMetadata = metadata
+
+ //------------------------------
+ // Bookkeeping
+ //------------------------------
+ // Release the retry token based on the state of the attempt's error (if any).
+ if releaseError := releaseRetryToken(err); releaseError != nil && err != nil {
+ return out, attemptResult, nopRelease, fmt.Errorf(
+ "failed to release retry token after request error, %w", err)
+ }
+ // Release the attempt token based on the state of the attempt's error (if any).
+ if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil {
+ return out, attemptResult, nopRelease, fmt.Errorf(
+ "failed to release initial token after request error, %w", err)
+ }
+ // If there was no error making the attempt, nothing further to do. There
+ // will be nothing to retry.
+ if err == nil {
+ return out, attemptResult, nopRelease, err
+ }
+
+ err = wrapAsClockSkew(ctx, err)
+
+ //------------------------------
+ // Is Retryable and Should Retry
+ //------------------------------
+ // If the attempt failed with an unretryable error, nothing further to do
+ // but return, and inform the caller about the terminal failure.
+ retryable := r.retryer.IsErrorRetryable(err)
+ if !retryable {
+ r.logf(logger, logging.Debug, "request failed with unretryable error %v", err)
+ return out, attemptResult, nopRelease, err
+ }
+
+ // set retryable to true
+ attemptResult.Retryable = true
+
+ // Once the maximum number of attempts have been exhausted there is nothing
+ // further to do other than inform the caller about the terminal failure.
+ if maxAttempts > 0 && attemptNum >= maxAttempts {
+ r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts)
+ err = &MaxAttemptsError{
+ Attempt: attemptNum,
+ Err: err,
+ }
+ return out, attemptResult, nopRelease, err
+ }
+
+ //------------------------------
+ // Get Retry (aka Retry Quota) Token
+ //------------------------------
+ // Get a retry token that will be released after the
+ releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err)
+ if retryTokenErr != nil {
+ return out, attemptResult, nopRelease, errors.Join(err, retryTokenErr)
+ }
+
+ //------------------------------
+ // Retry Delay and Sleep
+ //------------------------------
+ // Get the retry delay before another attempt can be made, and sleep for
+ // that time. Potentially early exist if the sleep is canceled via the
+ // context.
+ retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err)
+ if reqErr != nil {
+ return out, attemptResult, releaseRetryToken, reqErr
+ }
+ if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil {
+ err = &aws.RequestCanceledError{Err: reqErr}
+ return out, attemptResult, releaseRetryToken, err
+ }
+
+ // The request should be re-attempted.
+ attemptResult.Retried = true
+
+ return out, attemptResult, releaseRetryToken, err
+}
+
+// errors that, if detected when we know there's a clock skew,
+// can be retried and have a high chance of success
+var possibleSkewCodes = map[string]struct{}{
+ "InvalidSignatureException": {},
+ "SignatureDoesNotMatch": {},
+ "AuthFailure": {},
+}
+
+var definiteSkewCodes = map[string]struct{}{
+ "RequestExpired": {},
+ "RequestInTheFuture": {},
+ "RequestTimeTooSkewed": {},
+}
+
+// wrapAsClockSkew checks if this error could be related to a clock skew
+// error and if so, wrap the error.
+func wrapAsClockSkew(ctx context.Context, err error) error {
+ var v interface{ ErrorCode() string }
+ if !errors.As(err, &v) {
+ return err
+ }
+ if _, ok := definiteSkewCodes[v.ErrorCode()]; ok {
+ return &retryableClockSkewError{Err: err}
+ }
+ _, isPossibleSkewCode := possibleSkewCodes[v.ErrorCode()]
+ if skew := internalcontext.GetAttemptSkewContext(ctx); skew > skewThreshold && isPossibleSkewCode {
+ return &retryableClockSkewError{Err: err}
+ }
+ return err
+}
+
+// MetricsHeader attaches SDK request metric header for retries to the transport
+type MetricsHeader struct{}
+
+// ID returns the middleware identifier
+func (r *MetricsHeader) ID() string {
+ return "RetryMetricsHeader"
+}
+
+// HandleFinalize attaches the SDK request metric header to the transport layer
+func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) (
+ out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error,
+) {
+ retryMetadata, _ := getRetryMetadata(ctx)
+
+ const retryMetricHeader = "Amz-Sdk-Request"
+ var parts []string
+
+ parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum))
+ if retryMetadata.MaxAttempts != 0 {
+ parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts))
+ }
+
+ var ttl time.Time
+ if deadline, ok := ctx.Deadline(); ok {
+ ttl = deadline
+ }
+
+ // Only append the TTL if it can be determined.
+ if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 {
+ const unixTimeFormat = "20060102T150405Z"
+ ttl = ttl.Add(retryMetadata.AttemptClockSkew)
+ parts = append(parts, "ttl="+ttl.Format(unixTimeFormat))
+ }
+
+ switch req := in.Request.(type) {
+ case *http.Request:
+ req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; "))
+ default:
+ return out, metadata, fmt.Errorf("unknown transport type %T", req)
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+type retryMetadataKey struct{}
+
+// getRetryMetadata retrieves retryMetadata from the context and a bool
+// indicating if it was set.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) {
+ metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata)
+ return metadata, ok
+}
+
+// setRetryMetadata sets the retryMetadata on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context {
+ return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata)
+}
+
+// AddRetryMiddlewaresOptions is the set of options that can be passed to
+// AddRetryMiddlewares for configuring retry associated middleware.
+type AddRetryMiddlewaresOptions struct {
+ Retryer aws.Retryer
+
+ // Enable the logging of retry attempts performed by the SDK. This will
+ // include logging retry attempts, unretryable errors, and when max
+ // attempts are reached.
+ LogRetryAttempts bool
+}
+
+// AddRetryMiddlewares adds retry middleware to operation middleware stack
+func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error {
+ attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) {
+ middleware.LogAttempts = options.LogRetryAttempts
+ })
+
+ // index retry to before signing, if signing exists
+ if err := stack.Finalize.Insert(attempt, "Signing", smithymiddle.Before); err != nil {
+ return err
+ }
+
+ if err := stack.Finalize.Insert(&MetricsHeader{}, attempt.ID(), smithymiddle.After); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Determines the value of exception.type for metrics purposes. We prefer an
+// API-specific error code, otherwise it's just the Go type for the value.
+func errorType(err error) string {
+ var terr smithy.APIError
+ if errors.As(err, &terr) {
+ return terr.ErrorCode()
+ }
+ return fmt.Sprintf("%T", err)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go
new file mode 100644
index 000000000..af81635b3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go
@@ -0,0 +1,90 @@
+package retry
+
+import (
+ "context"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// AddWithErrorCodes returns a Retryer with additional error codes considered
+// for determining if the error should be retried.
+func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer {
+ retryable := &RetryableErrorCode{
+ Codes: map[string]struct{}{},
+ }
+ for _, c := range codes {
+ retryable.Codes[c] = struct{}{}
+ }
+
+ return &withIsErrorRetryable{
+ RetryerV2: wrapAsRetryerV2(r),
+ Retryable: retryable,
+ }
+}
+
+type withIsErrorRetryable struct {
+ aws.RetryerV2
+ Retryable IsErrorRetryable
+}
+
+func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool {
+ if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary {
+ return v.Bool()
+ }
+ return r.RetryerV2.IsErrorRetryable(err)
+}
+
+// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value
+// specified.
+func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer {
+ return &withMaxAttempts{
+ RetryerV2: wrapAsRetryerV2(r),
+ Max: max,
+ }
+}
+
+type withMaxAttempts struct {
+ aws.RetryerV2
+ Max int
+}
+
+func (w *withMaxAttempts) MaxAttempts() int {
+ return w.Max
+}
+
+// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer
+// overriding the RetryDelay behavior for a alternate minimum initial backoff
+// delay.
+func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer {
+ return &withMaxBackoffDelay{
+ RetryerV2: wrapAsRetryerV2(r),
+ backoff: NewExponentialJitterBackoff(delay),
+ }
+}
+
+type withMaxBackoffDelay struct {
+ aws.RetryerV2
+ backoff *ExponentialJitterBackoff
+}
+
+func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) {
+ return r.backoff.BackoffDelay(attempt, err)
+}
+
+type wrappedAsRetryerV2 struct {
+ aws.Retryer
+}
+
+func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 {
+ v, ok := r.(aws.RetryerV2)
+ if !ok {
+ v = wrappedAsRetryerV2{Retryer: r}
+ }
+
+ return v
+}
+
+func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) {
+ return w.Retryer.GetInitialToken(), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go
new file mode 100644
index 000000000..1b485f998
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go
@@ -0,0 +1,228 @@
+package retry
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// IsErrorRetryable provides the interface of an implementation to determine if
+// a error as the result of an operation is retryable.
+type IsErrorRetryable interface {
+ IsErrorRetryable(error) aws.Ternary
+}
+
+// IsErrorRetryables is a collection of checks to determine of the error is
+// retryable. Iterates through the checks and returns the state of retryable
+// if any check returns something other than unknown.
+type IsErrorRetryables []IsErrorRetryable
+
+// IsErrorRetryable returns if the error is retryable if any of the checks in
+// the list return a value other than unknown.
+func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary {
+ for _, re := range r {
+ if v := re.IsErrorRetryable(err); v != aws.UnknownTernary {
+ return v
+ }
+ }
+ return aws.UnknownTernary
+}
+
+// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface.
+type IsErrorRetryableFunc func(error) aws.Ternary
+
+// IsErrorRetryable returns if the error is retryable.
+func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary {
+ return fn(err)
+}
+
+// RetryableError is an IsErrorRetryable implementation which uses the
+// optional interface Retryable on the error value to determine if the error is
+// retryable.
+type RetryableError struct{}
+
+// IsErrorRetryable returns if the error is retryable if it satisfies the
+// Retryable interface, and returns if the attempt should be retried.
+func (RetryableError) IsErrorRetryable(err error) aws.Ternary {
+ var v interface{ RetryableError() bool }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ return aws.BoolTernary(v.RetryableError())
+}
+
+// NoRetryCanceledError detects if the error was an request canceled error and
+// returns if so.
+type NoRetryCanceledError struct{}
+
+// IsErrorRetryable returns the error is not retryable if the request was
+// canceled.
+func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary {
+ var v interface{ CanceledError() bool }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ if v.CanceledError() {
+ return aws.FalseTernary
+ }
+ return aws.UnknownTernary
+}
+
+// RetryableConnectionError determines if the underlying error is an HTTP
+// connection and returns if it should be retried.
+//
+// Includes errors such as connection reset, connection refused, net dial,
+// temporary, and timeout errors.
+type RetryableConnectionError struct{}
+
+// IsErrorRetryable returns if the error is caused by and HTTP connection
+// error, and should be retried.
+func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary {
+ if err == nil {
+ return aws.UnknownTernary
+ }
+ var retryable bool
+
+ var conErr interface{ ConnectionError() bool }
+ var tempErr interface{ Temporary() bool }
+ var timeoutErr interface{ Timeout() bool }
+ var urlErr *url.Error
+ var netOpErr *net.OpError
+ var dnsError *net.DNSError
+
+ if errors.As(err, &dnsError) {
+ // NXDOMAIN errors should not be retried
+ if dnsError.IsNotFound {
+ return aws.BoolTernary(false)
+ }
+
+ // if !dnsError.Temporary(), error may or may not be temporary,
+ // (i.e. !Temporary() =/=> !retryable) so we should fall through to
+ // remaining checks
+ if dnsError.Temporary() {
+ return aws.BoolTernary(true)
+ }
+ }
+
+ switch {
+ case errors.As(err, &conErr) && conErr.ConnectionError():
+ retryable = true
+
+ case strings.Contains(err.Error(), "use of closed network connection"):
+ fallthrough
+ case strings.Contains(err.Error(), "connection reset"):
+ // The errors "connection reset" and "use of closed network connection"
+ // are effectively the same. It appears to be the difference between
+ // sync and async read of TCP RST in the stdlib's net.Conn read loop.
+ // see #2737
+ retryable = true
+
+ case errors.As(err, &urlErr):
+ // Refused connections should be retried as the service may not yet be
+ // running on the port. Go TCP dial considers refused connections as
+ // not temporary.
+ if strings.Contains(urlErr.Error(), "connection refused") {
+ retryable = true
+ } else {
+ return r.IsErrorRetryable(errors.Unwrap(urlErr))
+ }
+
+ case errors.As(err, &netOpErr):
+ // Network dial, or temporary network errors are always retryable.
+ if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() {
+ retryable = true
+ } else {
+ return r.IsErrorRetryable(errors.Unwrap(netOpErr))
+ }
+
+ case errors.As(err, &tempErr) && tempErr.Temporary():
+ // Fallback to the generic temporary check, with temporary errors
+ // retryable.
+ retryable = true
+
+ case errors.As(err, &timeoutErr) && timeoutErr.Timeout():
+ // Fallback to the generic timeout check, with timeout errors
+ // retryable.
+ retryable = true
+
+ default:
+ return aws.UnknownTernary
+ }
+
+ return aws.BoolTernary(retryable)
+
+}
+
+// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status
+// codes.
+type RetryableHTTPStatusCode struct {
+ Codes map[int]struct{}
+}
+
+// IsErrorRetryable return if the passed in error is retryable based on the
+// HTTP status code.
+func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary {
+ var v interface{ HTTPStatusCode() int }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ _, ok := r.Codes[v.HTTPStatusCode()]
+ if !ok {
+ return aws.UnknownTernary
+ }
+
+ return aws.TrueTernary
+}
+
+// RetryableErrorCode determines if an attempt should be retried based on the
+// API error code.
+type RetryableErrorCode struct {
+ Codes map[string]struct{}
+}
+
+// IsErrorRetryable return if the error is retryable based on the error codes.
+// Returns unknown if the error doesn't have a code or it is unknown.
+func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary {
+ var v interface{ ErrorCode() string }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ _, ok := r.Codes[v.ErrorCode()]
+ if !ok {
+ return aws.UnknownTernary
+ }
+
+ return aws.TrueTernary
+}
+
+// retryableClockSkewError marks errors that can be caused by clock skew
+// (difference between server time and client time).
+// This is returned when there's certain confidence that adjusting the client time
+// could allow a retry to succeed
+type retryableClockSkewError struct{ Err error }
+
+func (e *retryableClockSkewError) Error() string {
+ return fmt.Sprintf("Probable clock skew error: %v", e.Err)
+}
+
+// Unwrap returns the wrapped error.
+func (e *retryableClockSkewError) Unwrap() error {
+ return e.Err
+}
+
+// RetryableError allows the retryer to retry this request
+func (e *retryableClockSkewError) RetryableError() bool {
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
new file mode 100644
index 000000000..d5ea93222
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
@@ -0,0 +1,269 @@
+package retry
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws/ratelimit"
+)
+
+// BackoffDelayer provides the interface for determining the delay to before
+// another request attempt, that previously failed.
+type BackoffDelayer interface {
+ BackoffDelay(attempt int, err error) (time.Duration, error)
+}
+
+// BackoffDelayerFunc provides a wrapper around a function to determine the
+// backoff delay of an attempt retry.
+type BackoffDelayerFunc func(int, error) (time.Duration, error)
+
+// BackoffDelay returns the delay before attempt to retry a request.
+func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) {
+ return fn(attempt, err)
+}
+
+const (
+ // DefaultMaxAttempts is the maximum of attempts for an API request
+ DefaultMaxAttempts int = 3
+
+ // DefaultMaxBackoff is the maximum back off delay between attempts
+ DefaultMaxBackoff time.Duration = 20 * time.Second
+)
+
+// Default retry token quota values.
+const (
+ DefaultRetryRateTokens uint = 500
+ DefaultRetryCost uint = 5
+ DefaultRetryTimeoutCost uint = 10
+ DefaultNoRetryIncrement uint = 1
+)
+
+// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK
+// should consider as retryable errors.
+var DefaultRetryableHTTPStatusCodes = map[int]struct{}{
+ 500: {},
+ 502: {},
+ 503: {},
+ 504: {},
+}
+
+// DefaultRetryableErrorCodes provides the set of API error codes that should
+// be retried.
+var DefaultRetryableErrorCodes = map[string]struct{}{
+ "RequestTimeout": {},
+ "RequestTimeoutException": {},
+}
+
+// DefaultThrottleErrorCodes provides the set of API error codes that are
+// considered throttle errors.
+var DefaultThrottleErrorCodes = map[string]struct{}{
+ "Throttling": {},
+ "ThrottlingException": {},
+ "ThrottledException": {},
+ "RequestThrottledException": {},
+ "TooManyRequestsException": {},
+ "ProvisionedThroughputExceededException": {},
+ "TransactionInProgressException": {},
+ "RequestLimitExceeded": {},
+ "BandwidthLimitExceeded": {},
+ "LimitExceededException": {},
+ "RequestThrottled": {},
+ "SlowDown": {},
+ "PriorRequestNotComplete": {},
+ "EC2ThrottledException": {},
+}
+
+// DefaultRetryables provides the set of retryable checks that are used by
+// default.
+var DefaultRetryables = []IsErrorRetryable{
+ NoRetryCanceledError{},
+ RetryableError{},
+ RetryableConnectionError{},
+ RetryableHTTPStatusCode{
+ Codes: DefaultRetryableHTTPStatusCodes,
+ },
+ RetryableErrorCode{
+ Codes: DefaultRetryableErrorCodes,
+ },
+ RetryableErrorCode{
+ Codes: DefaultThrottleErrorCodes,
+ },
+}
+
+// DefaultTimeouts provides the set of timeout checks that are used by default.
+var DefaultTimeouts = []IsErrorTimeout{
+ TimeouterError{},
+}
+
+// StandardOptions provides the functional options for configuring the standard
+// retryable, and delay behavior.
+type StandardOptions struct {
+ // Maximum number of attempts that should be made.
+ MaxAttempts int
+
+ // MaxBackoff duration between retried attempts.
+ MaxBackoff time.Duration
+
+ // Provides the backoff strategy the retryer will use to determine the
+ // delay between retry attempts.
+ Backoff BackoffDelayer
+
+ // Set of strategies to determine if the attempt should be retried based on
+ // the error response received.
+ //
+ // It is safe to append to this list in NewStandard's functional options.
+ Retryables []IsErrorRetryable
+
+ // Set of strategies to determine if the attempt failed due to a timeout
+ // error.
+ //
+ // It is safe to append to this list in NewStandard's functional options.
+ Timeouts []IsErrorTimeout
+
+ // Provides the rate limiting strategy for rate limiting attempt retries
+ // across all attempts the retryer is being used with.
+ //
+ // A RateLimiter operates as a token bucket with a set capacity, where
+ // attempt failures events consume tokens. A retry attempt that attempts to
+ // consume more tokens than what's available results in operation failure.
+ // The default implementation is parameterized as follows:
+ // - a capacity of 500 (DefaultRetryRateTokens)
+ // - a retry caused by a timeout costs 10 tokens (DefaultRetryCost)
+ // - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost)
+ // - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement)
+ //
+ // You can disable rate limiting by setting this field to ratelimit.None.
+ RateLimiter RateLimiter
+
+ // The cost to deduct from the RateLimiter's token bucket per retry.
+ RetryCost uint
+
+ // The cost to deduct from the RateLimiter's token bucket per retry caused
+ // by timeout error.
+ RetryTimeoutCost uint
+
+ // The cost to payback to the RateLimiter's token bucket for successful
+ // attempts.
+ NoRetryIncrement uint
+}
+
+// RateLimiter provides the interface for limiting the rate of attempt retries
+// allowed by the retryer.
+type RateLimiter interface {
+ GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error)
+ AddTokens(uint) error
+}
+
+// Standard is the standard retry pattern for the SDK. It uses a set of
+// retryable checks to determine of the failed attempt should be retried, and
+// what retry delay should be used.
+type Standard struct {
+ options StandardOptions
+
+ timeout IsErrorTimeout
+ retryable IsErrorRetryable
+ backoff BackoffDelayer
+}
+
+// NewStandard initializes a standard retry behavior with defaults that can be
+// overridden via functional options.
+func NewStandard(fnOpts ...func(*StandardOptions)) *Standard {
+ o := StandardOptions{
+ MaxAttempts: DefaultMaxAttempts,
+ MaxBackoff: DefaultMaxBackoff,
+ Retryables: append([]IsErrorRetryable{}, DefaultRetryables...),
+ Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...),
+
+ RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens),
+ RetryCost: DefaultRetryCost,
+ RetryTimeoutCost: DefaultRetryTimeoutCost,
+ NoRetryIncrement: DefaultNoRetryIncrement,
+ }
+ for _, fn := range fnOpts {
+ fn(&o)
+ }
+ if o.MaxAttempts <= 0 {
+ o.MaxAttempts = DefaultMaxAttempts
+ }
+
+ backoff := o.Backoff
+ if backoff == nil {
+ backoff = NewExponentialJitterBackoff(o.MaxBackoff)
+ }
+
+ return &Standard{
+ options: o,
+ backoff: backoff,
+ retryable: IsErrorRetryables(o.Retryables),
+ timeout: IsErrorTimeouts(o.Timeouts),
+ }
+}
+
+// MaxAttempts returns the maximum number of attempts that can be made for a
+// request before failing.
+func (s *Standard) MaxAttempts() int {
+ return s.options.MaxAttempts
+}
+
+// IsErrorRetryable returns if the error is can be retried or not. Should not
+// consider the number of attempts made.
+func (s *Standard) IsErrorRetryable(err error) bool {
+ return s.retryable.IsErrorRetryable(err).Bool()
+}
+
+// RetryDelay returns the delay to use before another request attempt is made.
+func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) {
+ return s.backoff.BackoffDelay(attempt, err)
+}
+
+// GetAttemptToken returns the token to be released after then attempt completes.
+// The release token will add NoRetryIncrement to the RateLimiter token pool if
+// the attempt was successful. If the attempt failed, nothing will be done.
+func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) {
+ return s.GetInitialToken(), nil
+}
+
+// GetInitialToken returns a token for adding the NoRetryIncrement to the
+// RateLimiter token if the attempt completed successfully without error.
+//
+// InitialToken applies to result of the each attempt, including the first.
+// Whereas the RetryToken applies to the result of subsequent attempts.
+//
+// Deprecated: use GetAttemptToken instead.
+func (s *Standard) GetInitialToken() func(error) error {
+ return releaseToken(s.noRetryIncrement).release
+}
+
+func (s *Standard) noRetryIncrement() error {
+ return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement)
+}
+
+// GetRetryToken attempts to deduct the retry cost from the retry token pool.
+// Returning the token release function, or error.
+func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) {
+ cost := s.options.RetryCost
+
+ if s.timeout.IsErrorTimeout(opErr).Bool() {
+ cost = s.options.RetryTimeoutCost
+ }
+
+ fn, err := s.options.RateLimiter.GetToken(ctx, cost)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get rate limit token, %w", err)
+ }
+
+ return releaseToken(fn).release, nil
+}
+
+func nopRelease(error) error { return nil }
+
+type releaseToken func() error
+
+func (f releaseToken) release(err error) error {
+ if err != nil {
+ return nil
+ }
+
+ return f()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go
new file mode 100644
index 000000000..c4b844d15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go
@@ -0,0 +1,60 @@
+package retry
+
+import (
+ "errors"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// IsErrorThrottle provides the interface of an implementation to determine if
+// a error response from an operation is a throttling error.
+type IsErrorThrottle interface {
+ IsErrorThrottle(error) aws.Ternary
+}
+
+// IsErrorThrottles is a collection of checks to determine of the error a
+// throttle error. Iterates through the checks and returns the state of
+// throttle if any check returns something other than unknown.
+type IsErrorThrottles []IsErrorThrottle
+
+// IsErrorThrottle returns if the error is a throttle error if any of the
+// checks in the list return a value other than unknown.
+func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary {
+ for _, re := range r {
+ if v := re.IsErrorThrottle(err); v != aws.UnknownTernary {
+ return v
+ }
+ }
+ return aws.UnknownTernary
+}
+
+// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface.
+type IsErrorThrottleFunc func(error) aws.Ternary
+
+// IsErrorThrottle returns if the error is a throttle error.
+func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary {
+ return fn(err)
+}
+
+// ThrottleErrorCode determines if an attempt should be retried based on the
+// API error code.
+type ThrottleErrorCode struct {
+ Codes map[string]struct{}
+}
+
+// IsErrorThrottle return if the error is a throttle error based on the error
+// codes. Returns unknown if the error doesn't have a code or it is unknown.
+func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary {
+ var v interface{ ErrorCode() string }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ _, ok := r.Codes[v.ErrorCode()]
+ if !ok {
+ return aws.UnknownTernary
+ }
+
+ return aws.TrueTernary
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go
new file mode 100644
index 000000000..3d47870d2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go
@@ -0,0 +1,52 @@
+package retry
+
+import (
+ "errors"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// IsErrorTimeout provides the interface of an implementation to determine if
+// a error matches.
+type IsErrorTimeout interface {
+ IsErrorTimeout(err error) aws.Ternary
+}
+
+// IsErrorTimeouts is a collection of checks to determine of the error is
+// retryable. Iterates through the checks and returns the state of retryable
+// if any check returns something other than unknown.
+type IsErrorTimeouts []IsErrorTimeout
+
+// IsErrorTimeout returns if the error is retryable if any of the checks in
+// the list return a value other than unknown.
+func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary {
+ for _, t := range ts {
+ if v := t.IsErrorTimeout(err); v != aws.UnknownTernary {
+ return v
+ }
+ }
+ return aws.UnknownTernary
+}
+
+// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface.
+type IsErrorTimeoutFunc func(error) aws.Ternary
+
+// IsErrorTimeout returns if the error is retryable.
+func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary {
+ return fn(err)
+}
+
+// TimeouterError provides the IsErrorTimeout implementation for determining if
+// an error is a timeout based on type with the Timeout method.
+type TimeouterError struct{}
+
+// IsErrorTimeout returns if the error is a timeout error.
+func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary {
+ var v interface{ Timeout() bool }
+
+ if !errors.As(err, &v) {
+ return aws.UnknownTernary
+ }
+
+ return aws.BoolTernary(v.Timeout())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
new file mode 100644
index 000000000..b0ba4cb2f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
@@ -0,0 +1,127 @@
+package aws
+
+import (
+ "context"
+ "fmt"
+ "time"
+)
+
+// RetryMode provides the mode the API client will use to create a retryer
+// based on.
+type RetryMode string
+
+const (
+ // RetryModeStandard model provides rate limited retry attempts with
+ // exponential backoff delay.
+ RetryModeStandard RetryMode = "standard"
+
+ // RetryModeAdaptive model provides attempt send rate limiting on throttle
+ // responses in addition to standard mode's retry rate limiting.
+ //
+ // Adaptive retry mode is experimental and is subject to change in the
+ // future.
+ RetryModeAdaptive RetryMode = "adaptive"
+)
+
+// ParseRetryMode attempts to parse a RetryMode from the given string.
+// Returning error if the value is not a known RetryMode.
+func ParseRetryMode(v string) (mode RetryMode, err error) {
+ switch v {
+ case "standard":
+ return RetryModeStandard, nil
+ case "adaptive":
+ return RetryModeAdaptive, nil
+ default:
+ return mode, fmt.Errorf("unknown RetryMode, %v", v)
+ }
+}
+
+func (m RetryMode) String() string { return string(m) }
+
+// Retryer is an interface to determine if a given error from a
+// attempt should be retried, and if so what backoff delay to apply. The
+// default implementation used by most services is the retry package's Standard
+// type. Which contains basic retry logic using exponential backoff.
+type Retryer interface {
+ // IsErrorRetryable returns if the failed attempt is retryable. This check
+ // should determine if the error can be retried, or if the error is
+ // terminal.
+ IsErrorRetryable(error) bool
+
+ // MaxAttempts returns the maximum number of attempts that can be made for
+ // an attempt before failing. A value of 0 implies that the attempt should
+ // be retried until it succeeds if the errors are retryable.
+ MaxAttempts() int
+
+ // RetryDelay returns the delay that should be used before retrying the
+ // attempt. Will return error if the delay could not be determined.
+ RetryDelay(attempt int, opErr error) (time.Duration, error)
+
+ // GetRetryToken attempts to deduct the retry cost from the retry token pool.
+ // Returning the token release function, or error.
+ GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error)
+
+ // GetInitialToken returns the initial attempt token that can increment the
+ // retry token pool if the attempt is successful.
+ GetInitialToken() (releaseToken func(error) error)
+}
+
+// RetryerV2 is an interface to determine if a given error from an attempt
+// should be retried, and if so what backoff delay to apply. The default
+// implementation used by most services is the retry package's Standard type.
+// Which contains basic retry logic using exponential backoff.
+//
+// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken
+// method in favor of GetAttemptToken which takes a context, and can return an error.
+//
+// The SDK's retry package's Attempt middleware, and utilities will always
+// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if
+// GetAttemptToken is not implemented.
+type RetryerV2 interface {
+ Retryer
+
+ // GetInitialToken returns the initial attempt token that can increment the
+ // retry token pool if the attempt is successful.
+ //
+ // Deprecated: This method does not provide a way to block using Context,
+ // nor can it return an error. Use RetryerV2, and GetAttemptToken instead.
+ GetInitialToken() (releaseToken func(error) error)
+
+ // GetAttemptToken returns the send token that can be used to rate limit
+ // attempt calls. Will be used by the SDK's retry package's Attempt
+ // middleware to get a send token prior to calling the temp and releasing
+ // the send token after the attempt has been made.
+ GetAttemptToken(context.Context) (func(error) error, error)
+}
+
+// NopRetryer provides a RequestRetryDecider implementation that will flag
+// all attempt errors as not retryable, with a max attempts of 1.
+type NopRetryer struct{}
+
+// IsErrorRetryable returns false for all error values.
+func (NopRetryer) IsErrorRetryable(error) bool { return false }
+
+// MaxAttempts always returns 1 for the original attempt.
+func (NopRetryer) MaxAttempts() int { return 1 }
+
+// RetryDelay is not valid for the NopRetryer. Will always return error.
+func (NopRetryer) RetryDelay(int, error) (time.Duration, error) {
+ return 0, fmt.Errorf("not retrying any attempt errors")
+}
+
+// GetRetryToken returns a stub function that does nothing.
+func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) {
+ return nopReleaseToken, nil
+}
+
+// GetInitialToken returns a stub function that does nothing.
+func (NopRetryer) GetInitialToken() func(error) error {
+ return nopReleaseToken
+}
+
+// GetAttemptToken returns a stub function that does nothing.
+func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) {
+ return nopReleaseToken, nil
+}
+
+func nopReleaseToken(error) error { return nil }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go
new file mode 100644
index 000000000..3af9b2b33
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go
@@ -0,0 +1,14 @@
+package aws
+
+// ExecutionEnvironmentID is the AWS execution environment runtime identifier.
+type ExecutionEnvironmentID string
+
+// RuntimeEnvironment is a collection of values that are determined at runtime
+// based on the environment that the SDK is executing in. Some of these values
+// may or may not be present based on the executing environment and certain SDK
+// configuration properties that drive whether these values are populated..
+type RuntimeEnvironment struct {
+ EnvironmentIdentifier ExecutionEnvironmentID
+ Region string
+ EC2InstanceMetadataRegion string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go
new file mode 100644
index 000000000..cbf22f1d0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go
@@ -0,0 +1,115 @@
+package v4
+
+import (
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+func lookupKey(service, region string) string {
+ var s strings.Builder
+ s.Grow(len(region) + len(service) + 3)
+ s.WriteString(region)
+ s.WriteRune('/')
+ s.WriteString(service)
+ return s.String()
+}
+
+type derivedKey struct {
+ AccessKey string
+ Date time.Time
+ Credential []byte
+}
+
+type derivedKeyCache struct {
+ values map[string]derivedKey
+ mutex sync.RWMutex
+}
+
+func newDerivedKeyCache() derivedKeyCache {
+ return derivedKeyCache{
+ values: make(map[string]derivedKey),
+ }
+}
+
+func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte {
+ key := lookupKey(service, region)
+ s.mutex.RLock()
+ if cred, ok := s.get(key, credentials, signingTime.Time); ok {
+ s.mutex.RUnlock()
+ return cred
+ }
+ s.mutex.RUnlock()
+
+ s.mutex.Lock()
+ if cred, ok := s.get(key, credentials, signingTime.Time); ok {
+ s.mutex.Unlock()
+ return cred
+ }
+ cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime)
+ entry := derivedKey{
+ AccessKey: credentials.AccessKeyID,
+ Date: signingTime.Time,
+ Credential: cred,
+ }
+ s.values[key] = entry
+ s.mutex.Unlock()
+
+ return cred
+}
+
+func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) {
+ cacheEntry, ok := s.retrieveFromCache(key)
+ if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) {
+ return cacheEntry.Credential, true
+ }
+ return nil, false
+}
+
+func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) {
+ if v, ok := s.values[key]; ok {
+ return v, true
+ }
+ return derivedKey{}, false
+}
+
+// SigningKeyDeriver derives a signing key from a set of credentials
+type SigningKeyDeriver struct {
+ cache derivedKeyCache
+}
+
+// NewSigningKeyDeriver returns a new SigningKeyDeriver
+func NewSigningKeyDeriver() *SigningKeyDeriver {
+ return &SigningKeyDeriver{
+ cache: newDerivedKeyCache(),
+ }
+}
+
+// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing.
+func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte {
+ return k.cache.Get(credential, service, region, signingTime)
+}
+
+func deriveKey(secret, service, region string, t SigningTime) []byte {
+ hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat()))
+ hmacRegion := HMACSHA256(hmacDate, []byte(region))
+ hmacService := HMACSHA256(hmacRegion, []byte(service))
+ return HMACSHA256(hmacService, []byte("aws4_request"))
+}
+
+func isSameDay(x, y time.Time) bool {
+ xYear, xMonth, xDay := x.Date()
+ yYear, yMonth, yDay := y.Date()
+
+ if xYear != yYear {
+ return false
+ }
+
+ if xMonth != yMonth {
+ return false
+ }
+
+ return xDay == yDay
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go
new file mode 100644
index 000000000..a23cb003b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go
@@ -0,0 +1,40 @@
+package v4
+
+// Signature Version 4 (SigV4) Constants
+const (
+ // EmptyStringSHA256 is the hex encoded sha256 value of an empty string
+ EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+
+ // UnsignedPayload indicates that the request payload body is unsigned
+ UnsignedPayload = "UNSIGNED-PAYLOAD"
+
+ // AmzAlgorithmKey indicates the signing algorithm
+ AmzAlgorithmKey = "X-Amz-Algorithm"
+
+ // AmzSecurityTokenKey indicates the security token to be used with temporary credentials
+ AmzSecurityTokenKey = "X-Amz-Security-Token"
+
+ // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
+ AmzDateKey = "X-Amz-Date"
+
+ // AmzCredentialKey is the access key ID and credential scope
+ AmzCredentialKey = "X-Amz-Credential"
+
+ // AmzSignedHeadersKey is the set of headers signed for the request
+ AmzSignedHeadersKey = "X-Amz-SignedHeaders"
+
+ // AmzSignatureKey is the query parameter to store the SigV4 signature
+ AmzSignatureKey = "X-Amz-Signature"
+
+ // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
+ TimeFormat = "20060102T150405Z"
+
+ // ShortTimeFormat is the shorten time format used in the credential scope
+ ShortTimeFormat = "20060102"
+
+ // ContentSHAKey is the SHA256 of request body
+ ContentSHAKey = "X-Amz-Content-Sha256"
+
+ // StreamingEventsPayload indicates that the request payload body is a signed event stream.
+ StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go
new file mode 100644
index 000000000..c61955ad5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings"
+)
+
+// Rules houses a set of Rule needed for validation of a
+// string value
+type Rules []Rule
+
+// Rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that Rule
+type Rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r Rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// MapRule generic Rule for maps
+type MapRule map[string]struct{}
+
+// IsValid for the map Rule satisfies whether it exists in the map
+func (m MapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// AllowList is a generic Rule for include listing
+type AllowList struct {
+ Rule
+}
+
+// IsValid for AllowList checks if the value is within the AllowList
+func (w AllowList) IsValid(value string) bool {
+ return w.Rule.IsValid(value)
+}
+
+// ExcludeList is a generic Rule for exclude listing
+type ExcludeList struct {
+ Rule
+}
+
+// IsValid for AllowList checks if the value is within the AllowList
+func (b ExcludeList) IsValid(value string) bool {
+ return !b.Rule.IsValid(value)
+}
+
+// Patterns is a list of strings to match against
+type Patterns []string
+
+// IsValid for Patterns checks each pattern and returns if a match has
+// been found
+func (p Patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if sdkstrings.HasPrefixFold(value, pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// InclusiveRules rules allow for rules to depend on one another
+type InclusiveRules []Rule
+
+// IsValid will return true if all rules are true
+func (r InclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go
new file mode 100644
index 000000000..d99b32ceb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go
@@ -0,0 +1,70 @@
+package v4
+
+// IgnoredHeaders is a list of headers that are ignored during signing
+var IgnoredHeaders = Rules{
+ ExcludeList{
+ MapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ "Expect": struct{}{},
+ "Transfer-Encoding": struct{}{},
+ },
+ },
+}
+
+// RequiredSignedHeaders is a allow list for Build canonical headers.
+var RequiredSignedHeaders = Rules{
+ AllowList{
+ MapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Context": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ "X-Amz-Content-Sha256": struct{}{},
+ "X-Amz-Tagging": struct{}{},
+ },
+ },
+ Patterns{"X-Amz-Object-Lock-"},
+ Patterns{"X-Amz-Meta-"},
+}
+
+// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value
+// represents whether or not it is a pattern.
+var AllowedQueryHoisting = InclusiveRules{
+ ExcludeList{RequiredSignedHeaders},
+ Patterns{"X-Amz-"},
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go
new file mode 100644
index 000000000..e7fa7a1b1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go
@@ -0,0 +1,13 @@
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+)
+
+// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
+func HMACSHA256(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go
new file mode 100644
index 000000000..bf93659a4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go
@@ -0,0 +1,75 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// SanitizeHostForHeader removes default port from host and updates request.Host
+func SanitizeHostForHeader(r *http.Request) {
+ host := getHost(r)
+ port := portOnly(host)
+ if port != "" && isDefaultPort(r.URL.Scheme, port) {
+ r.Host = stripPort(host)
+ }
+}
+
+// Returns host from request
+func getHost(r *http.Request) string {
+ if r.Host != "" {
+ return r.Host
+ }
+
+ return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+ if port == "" {
+ return true
+ }
+
+ lowerCaseScheme := strings.ToLower(scheme)
+ if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go
new file mode 100644
index 000000000..fc7887909
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go
@@ -0,0 +1,13 @@
+package v4
+
+import "strings"
+
+// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope
+func BuildCredentialScope(signingTime SigningTime, region, service string) string {
+ return strings.Join([]string{
+ signingTime.ShortTimeFormat(),
+ region,
+ service,
+ "aws4_request",
+ }, "/")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go
new file mode 100644
index 000000000..1de06a765
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go
@@ -0,0 +1,36 @@
+package v4
+
+import "time"
+
+// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
+type SigningTime struct {
+ time.Time
+ timeFormat string
+ shortTimeFormat string
+}
+
+// NewSigningTime creates a new SigningTime given a time.Time
+func NewSigningTime(t time.Time) SigningTime {
+ return SigningTime{
+ Time: t,
+ }
+}
+
+// TimeFormat provides a time formatted in the X-Amz-Date format.
+func (m *SigningTime) TimeFormat() string {
+ return m.format(&m.timeFormat, TimeFormat)
+}
+
+// ShortTimeFormat provides a time formatted of 20060102.
+func (m *SigningTime) ShortTimeFormat() string {
+ return m.format(&m.shortTimeFormat, ShortTimeFormat)
+}
+
+func (m *SigningTime) format(target *string, format string) string {
+ if len(*target) > 0 {
+ return *target
+ }
+ v := m.Time.Format(format)
+ *target = v
+ return v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go
new file mode 100644
index 000000000..d025dbaa0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go
@@ -0,0 +1,80 @@
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+const doubleSpace = " "
+
+// StripExcessSpaces will rewrite the passed in slice's string values to not
+// contain multiple side-by-side spaces.
+func StripExcessSpaces(str string) string {
+ var j, k, l, m, spaces int
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
+
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
+
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ return str
+ }
+
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
+ }
+ spaces++
+ } else {
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
+ }
+ }
+
+ return string(buf[:m])
+}
+
+// GetURIPath returns the escaped URI component from the provided URL.
+func GetURIPath(u *url.URL) string {
+ var uriPath string
+
+ if len(u.Opaque) > 0 {
+ const schemeSep, pathSep, queryStart = "//", "/", "?"
+
+ opaque := u.Opaque
+ // Cut off the query string if present.
+ if idx := strings.Index(opaque, queryStart); idx >= 0 {
+ opaque = opaque[:idx]
+ }
+
+ // Cutout the scheme separator if present.
+ if strings.Index(opaque, schemeSep) == 0 {
+ opaque = opaque[len(schemeSep):]
+ }
+
+ // capture URI path starting with first path separator.
+ if idx := strings.Index(opaque, pathSep); idx >= 0 {
+ uriPath = opaque[idx:]
+ }
+ } else {
+ uriPath = u.EscapedPath()
+ }
+
+ if len(uriPath) == 0 {
+ uriPath = "/"
+ }
+
+ return uriPath
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
new file mode 100644
index 000000000..8a46220a3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
@@ -0,0 +1,420 @@
+package v4
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const computePayloadHashMiddlewareID = "ComputePayloadHash"
+
+// HashComputationError indicates an error occurred while computing the signing hash
+type HashComputationError struct {
+ Err error
+}
+
+// Error is the error message
+func (e *HashComputationError) Error() string {
+ return fmt.Sprintf("failed to compute payload hash: %v", e.Err)
+}
+
+// Unwrap returns the underlying error if one is set
+func (e *HashComputationError) Unwrap() error {
+ return e.Err
+}
+
+// SigningError indicates an error condition occurred while performing SigV4 signing
+type SigningError struct {
+ Err error
+}
+
+func (e *SigningError) Error() string {
+ return fmt.Sprintf("failed to sign request: %v", e.Err)
+}
+
+// Unwrap returns the underlying error cause
+func (e *SigningError) Unwrap() error {
+ return e.Err
+}
+
+// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that
+// switches between unsigned and signed payload based on TLS state for request.
+// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth.
+// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection .
+//
+// Usage example -
+// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to
+// dynamically switch between unsigned and signed payload based on TLS state for request.
+func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{})
+ return err
+}
+
+// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware.
+type dynamicPayloadSigningMiddleware struct {
+}
+
+// ID returns the resolver identifier
+func (m *dynamicPayloadSigningMiddleware) ID() string {
+ return computePayloadHashMiddlewareID
+}
+
+// HandleFinalize delegates SHA256 computation according to whether the request
+// is TLS-enabled.
+func (m *dynamicPayloadSigningMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if req.IsHTTPS() {
+ return (&UnsignedPayload{}).HandleFinalize(ctx, in, next)
+ }
+ return (&ComputePayloadSHA256{}).HandleFinalize(ctx, in, next)
+}
+
+// UnsignedPayload sets the SigV4 request payload hash to unsigned.
+//
+// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
+// stored in the context. (e.g. application pre-computed SHA256 before making
+// API call).
+//
+// This middleware does not check the X-Amz-Content-Sha256 header, if that
+// header is serialized a middleware must translate it into the context.
+type UnsignedPayload struct{}
+
+// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation
+// middleware stack
+func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+// ID returns the unsignedPayload identifier
+func (m *UnsignedPayload) ID() string {
+ return computePayloadHashMiddlewareID
+}
+
+// HandleFinalize sets the payload hash magic value to the unsigned sentinel.
+func (m *UnsignedPayload) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if GetPayloadHash(ctx) == "" {
+ ctx = SetPayloadHash(ctx, v4Internal.UnsignedPayload)
+ }
+ return next.HandleFinalize(ctx, in)
+}
+
+// ComputePayloadSHA256 computes SHA256 payload hash to sign.
+//
+// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
+// stored in the context. (e.g. application pre-computed SHA256 before making
+// API call).
+//
+// This middleware does not check the X-Amz-Content-Sha256 header, if that
+// header is serialized a middleware must translate it into the context.
+type ComputePayloadSHA256 struct{}
+
+// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the
+// operation middleware stack
+func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the
+// operation middleware stack
+func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error {
+ _, err := stack.Finalize.Remove(computePayloadHashMiddlewareID)
+ return err
+}
+
+// ID is the middleware name
+func (m *ComputePayloadSHA256) ID() string {
+ return computePayloadHashMiddlewareID
+}
+
+// HandleFinalize computes the payload hash for the request, storing it to the
+// context. This is a no-op if a caller has previously set that value.
+func (m *ComputePayloadSHA256) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if GetPayloadHash(ctx) != "" {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ _, span := tracing.StartSpan(ctx, "ComputePayloadSHA256")
+ defer span.End()
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &HashComputationError{
+ Err: fmt.Errorf("unexpected request middleware type %T", in.Request),
+ }
+ }
+
+ hash := sha256.New()
+ if stream := req.GetStream(); stream != nil {
+ _, err = io.Copy(hash, stream)
+ if err != nil {
+ return out, metadata, &HashComputationError{
+ Err: fmt.Errorf("failed to compute payload hash, %w", err),
+ }
+ }
+
+ if err := req.RewindStream(); err != nil {
+ return out, metadata, &HashComputationError{
+ Err: fmt.Errorf("failed to seek body to start, %w", err),
+ }
+ }
+ }
+
+ ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil)))
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the
+// ComputePayloadSHA256 middleware with the UnsignedPayload middleware.
+//
+// Use this to disable computing the Payload SHA256 checksum and instead use
+// UNSIGNED-PAYLOAD for the SHA256 value.
+func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &UnsignedPayload{})
+ return err
+}
+
+// ContentSHA256Header sets the X-Amz-Content-Sha256 header value to
+// the Payload hash stored in the context.
+type ContentSHA256Header struct{}
+
+// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the
+// operation middleware stack
+func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&ContentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After)
+}
+
+// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware
+// from the operation middleware stack
+func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Finalize.Remove((*ContentSHA256Header)(nil).ID())
+ return err
+}
+
+// ID returns the ContentSHA256HeaderMiddleware identifier
+func (m *ContentSHA256Header) ID() string {
+ return "SigV4ContentSHA256Header"
+}
+
+// HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash
+// stored in the context.
+func (m *ContentSHA256Header) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)}
+ }
+
+ req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx))
+ return next.HandleFinalize(ctx, in)
+}
+
+// SignHTTPRequestMiddlewareOptions is the configuration options for
+// [SignHTTPRequestMiddleware].
+//
+// Deprecated: [SignHTTPRequestMiddleware] is deprecated.
+type SignHTTPRequestMiddlewareOptions struct {
+ CredentialsProvider aws.CredentialsProvider
+ Signer HTTPSigner
+ LogSigning bool
+}
+
+// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4
+// HTTP Signing.
+//
+// Deprecated: AWS service clients no longer use this middleware. Signing as an
+// SDK operation is now performed through an internal per-service middleware
+// which opaquely selects and uses the signer from the resolved auth scheme.
+type SignHTTPRequestMiddleware struct {
+ credentialsProvider aws.CredentialsProvider
+ signer HTTPSigner
+ logSigning bool
+}
+
+// NewSignHTTPRequestMiddleware constructs a [SignHTTPRequestMiddleware] using
+// the given [Signer] for signing requests.
+//
+// Deprecated: SignHTTPRequestMiddleware is deprecated.
+func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware {
+ return &SignHTTPRequestMiddleware{
+ credentialsProvider: options.CredentialsProvider,
+ signer: options.Signer,
+ logSigning: options.LogSigning,
+ }
+}
+
+// ID is the SignHTTPRequestMiddleware identifier.
+//
+// Deprecated: SignHTTPRequestMiddleware is deprecated.
+func (s *SignHTTPRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+// HandleFinalize will take the provided input and sign the request using the
+// SigV4 authentication scheme.
+//
+// Deprecated: SignHTTPRequestMiddleware is deprecated.
+func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if !haveCredentialProvider(s.credentialsProvider) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)}
+ }
+
+ signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx)
+ payloadHash := GetPayloadHash(ctx)
+ if len(payloadHash) == 0 {
+ return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")}
+ }
+
+ credentials, err := s.credentialsProvider.Retrieve(ctx)
+ if err != nil {
+ return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)}
+ }
+
+ signerOptions := []func(o *SignerOptions){
+ func(o *SignerOptions) {
+ o.Logger = middleware.GetLogger(ctx)
+ o.LogSigning = s.logSigning
+ },
+ }
+
+ // existing DisableURIPathEscaping is equivalent in purpose
+ // to authentication scheme property DisableDoubleEncoding
+ disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx)
+ if overridden {
+ signerOptions = append(signerOptions, func(o *SignerOptions) {
+ o.DisableURIPathEscaping = disableDoubleEncoding
+ })
+ }
+
+ err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...)
+ if err != nil {
+ return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)}
+ }
+
+ ctx = awsmiddleware.SetSigningCredentials(ctx, credentials)
+
+ return next.HandleFinalize(ctx, in)
+}
+
+// StreamingEventsPayload signs input event stream messages.
+type StreamingEventsPayload struct{}
+
+// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack.
+func AddStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&StreamingEventsPayload{}, middleware.Before)
+}
+
+// ID identifies the middleware.
+func (s *StreamingEventsPayload) ID() string {
+ return computePayloadHashMiddlewareID
+}
+
+// HandleFinalize marks the input stream to be signed with SigV4.
+func (s *StreamingEventsPayload) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ contentSHA := GetPayloadHash(ctx)
+ if len(contentSHA) == 0 {
+ contentSHA = v4Internal.StreamingEventsPayload
+ }
+
+ ctx = SetPayloadHash(ctx, contentSHA)
+
+ return next.HandleFinalize(ctx, in)
+}
+
+// GetSignedRequestSignature attempts to extract the signature of the request.
+// Returning an error if the request is unsigned, or unable to extract the
+// signature.
+func GetSignedRequestSignature(r *http.Request) ([]byte, error) {
+ const authHeaderSignatureElem = "Signature="
+
+ if auth := r.Header.Get(authorizationHeader); len(auth) != 0 {
+ ps := strings.Split(auth, ",")
+ for _, p := range ps {
+ p = strings.TrimSpace(p)
+ if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 {
+ sig := p[len(authHeaderSignatureElem):]
+ if len(sig) == 0 {
+ return nil, fmt.Errorf("invalid request signature authorization header")
+ }
+ return hex.DecodeString(sig)
+ }
+ }
+ }
+
+ if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 {
+ return hex.DecodeString(sig)
+ }
+
+ return nil, fmt.Errorf("request not signed")
+}
+
+func haveCredentialProvider(p aws.CredentialsProvider) bool {
+ if p == nil {
+ return false
+ }
+
+ return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil))
+}
+
+type payloadHashKey struct{}
+
+// GetPayloadHash retrieves the payload hash to use for signing
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetPayloadHash(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string)
+ return v
+}
+
+// SetPayloadHash sets the payload hash to be used for signing the request
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetPayloadHash(ctx context.Context, hash string) context.Context {
+ return middleware.WithStackValue(ctx, payloadHashKey{}, hash)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go
new file mode 100644
index 000000000..e1a066512
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go
@@ -0,0 +1,127 @@
+package v4
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go/middleware"
+ smithyHTTP "github.com/aws/smithy-go/transport/http"
+)
+
+// HTTPPresigner is an interface to a SigV4 signer that can sign create a
+// presigned URL for a HTTP requests.
+type HTTPPresigner interface {
+ PresignHTTP(
+ ctx context.Context, credentials aws.Credentials, r *http.Request,
+ payloadHash string, service string, region string, signingTime time.Time,
+ optFns ...func(*SignerOptions),
+ ) (url string, signedHeader http.Header, err error)
+}
+
+// PresignedHTTPRequest provides the URL and signed headers that are included
+// in the presigned URL.
+type PresignedHTTPRequest struct {
+ URL string
+ Method string
+ SignedHeader http.Header
+}
+
+// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware.
+type PresignHTTPRequestMiddlewareOptions struct {
+ CredentialsProvider aws.CredentialsProvider
+ Presigner HTTPPresigner
+ LogSigning bool
+}
+
+// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a
+// presigned URL for an HTTP request.
+//
+// Will short circuit the middleware stack and not forward onto the next
+// Finalize handler.
+type PresignHTTPRequestMiddleware struct {
+ credentialsProvider aws.CredentialsProvider
+ presigner HTTPPresigner
+ logSigning bool
+}
+
+// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware
+// initialized with the presigner.
+func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware {
+ return &PresignHTTPRequestMiddleware{
+ credentialsProvider: options.CredentialsProvider,
+ presigner: options.Presigner,
+ logSigning: options.LogSigning,
+ }
+}
+
+// ID provides the middleware ID.
+func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" }
+
+// HandleFinalize will take the provided input and create a presigned url for
+// the http request using the SigV4 presign authentication scheme.
+//
+// Since the signed request is not a valid HTTP request
+func (s *PresignHTTPRequestMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyHTTP.Request)
+ if !ok {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("unexpected request middleware type %T", in.Request),
+ }
+ }
+
+ httpReq := req.Build(ctx)
+ if !haveCredentialProvider(s.credentialsProvider) {
+ out.Result = &PresignedHTTPRequest{
+ URL: httpReq.URL.String(),
+ Method: httpReq.Method,
+ SignedHeader: http.Header{},
+ }
+
+ return out, metadata, nil
+ }
+
+ signingName := awsmiddleware.GetSigningName(ctx)
+ signingRegion := awsmiddleware.GetSigningRegion(ctx)
+ payloadHash := GetPayloadHash(ctx)
+ if len(payloadHash) == 0 {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("computed payload hash missing from context"),
+ }
+ }
+
+ credentials, err := s.credentialsProvider.Retrieve(ctx)
+ if err != nil {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("failed to retrieve credentials: %w", err),
+ }
+ }
+
+ u, h, err := s.presigner.PresignHTTP(ctx, credentials,
+ httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(),
+ func(o *SignerOptions) {
+ o.Logger = middleware.GetLogger(ctx)
+ o.LogSigning = s.logSigning
+ })
+ if err != nil {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("failed to sign http request, %w", err),
+ }
+ }
+
+ out.Result = &PresignedHTTPRequest{
+ URL: u,
+ Method: httpReq.Method,
+ SignedHeader: h,
+ }
+
+ return out, metadata, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go
new file mode 100644
index 000000000..32875e077
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go
@@ -0,0 +1,86 @@
+package v4
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
+ "strings"
+ "time"
+)
+
+// EventStreamSigner is an AWS EventStream protocol signer.
+type EventStreamSigner interface {
+ GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error)
+}
+
+// StreamSignerOptions is the configuration options for StreamSigner.
+type StreamSignerOptions struct{}
+
+// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads.
+type StreamSigner struct {
+ options StreamSignerOptions
+
+ credentials aws.Credentials
+ service string
+ region string
+
+ prevSignature []byte
+
+ signingKeyDeriver *v4Internal.SigningKeyDeriver
+}
+
+// NewStreamSigner returns a new AWS EventStream protocol signer.
+func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner {
+ o := StreamSignerOptions{}
+
+ for _, fn := range optFns {
+ fn(&o)
+ }
+
+ return &StreamSigner{
+ options: o,
+ credentials: credentials,
+ service: service,
+ region: region,
+ signingKeyDeriver: v4Internal.NewSigningKeyDeriver(),
+ prevSignature: seedSignature,
+ }
+}
+
+// GetSignature signs the provided header and payload bytes.
+func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) {
+ options := s.options
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ prevSignature := s.prevSignature
+
+ st := v4Internal.NewSigningTime(signingTime.UTC())
+
+ sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
+
+ scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
+
+ stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st)
+
+ signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
+ s.prevSignature = signature
+
+ return signature, nil
+}
+
+func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
+ hash := sha256.New()
+ return strings.Join([]string{
+ "AWS4-HMAC-SHA256-PAYLOAD",
+ signingTime.TimeFormat(),
+ credentialScope,
+ hex.EncodeToString(previousSignature),
+ hex.EncodeToString(makeHash(hash, headers)),
+ hex.EncodeToString(makeHash(hash, payload)),
+ }, "\n")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
new file mode 100644
index 000000000..7ed91d5ba
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
@@ -0,0 +1,564 @@
+// Package v4 implements the AWS signature version 4 algorithm (commonly known
+// as SigV4).
+//
+// For more information about SigV4, see [Signing AWS API requests] in the IAM
+// user guide.
+//
+// While this implementation CAN work in an external context, it is developed
+// primarily for SDK use and you may encounter fringe behaviors around header
+// canonicalization.
+//
+// # Pre-escaping a request URI
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// component must be the escaped form of the HTTP request's path.
+//
+// The Go HTTP client will perform escaping automatically on the HTTP request.
+// This may cause signature validation errors because the request differs from
+// the URI path or query from which the signature was generated.
+//
+// Because of this, we recommend that you explicitly escape the request when
+// using this signer outside of the SDK to prevent possible signature mismatch.
+// This can be done by setting URL.Opaque on the request. The signer will
+// prefer that value, falling back to the return of URL.EscapedPath if unset.
+//
+// When setting URL.Opaque you must do so in the form of:
+//
+// "///"
+//
+// // e.g.
+// "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the escaping will not work
+// correctly.
+//
+// The TestStandaloneSign unit test provides a complete example of using the
+// signer outside of the SDK and pre-escaping the URI path.
+//
+// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html
+package v4
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "github.com/aws/smithy-go/logging"
+)
+
+const (
+ signingAlgorithm = "AWS4-HMAC-SHA256"
+ authorizationHeader = "Authorization"
+
+ // Version of signing v4
+ Version = "SigV4"
+)
+
+// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests
+type HTTPSigner interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error
+}
+
+type keyDerivator interface {
+ DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte
+}
+
+// SignerOptions is the SigV4 Signer options.
+type SignerOptions struct {
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+
+ // The logger to send log messages to.
+ Logger logging.Logger
+
+ // Enable logging of signed requests.
+ // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
+ // presigned URL.
+ LogSigning bool
+
+ // Disables setting the session token on the request as part of signing
+ // through X-Amz-Security-Token. This is needed for variations of v4 that
+ // present the token elsewhere.
+ DisableSessionToken bool
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+ options SignerOptions
+ keyDerivator keyDerivator
+}
+
+// NewSigner returns a new SigV4 Signer
+func NewSigner(optFns ...func(signer *SignerOptions)) *Signer {
+ options := SignerOptions{}
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()}
+}
+
+type httpSigner struct {
+ Request *http.Request
+ ServiceName string
+ Region string
+ Time v4Internal.SigningTime
+ Credentials aws.Credentials
+ KeyDerivator keyDerivator
+ IsPreSign bool
+
+ PayloadHash string
+
+ DisableHeaderHoisting bool
+ DisableURIPathEscaping bool
+ DisableSessionToken bool
+}
+
+func (s *httpSigner) Build() (signedRequest, error) {
+ req := s.Request
+
+ query := req.URL.Query()
+ headers := req.Header
+
+ s.setRequiredSigningFields(headers, query)
+
+ // Sort Each Query Key's Values
+ for key := range query {
+ sort.Strings(query[key])
+ }
+
+ v4Internal.SanitizeHostForHeader(req)
+
+ credentialScope := s.buildCredentialScope()
+ credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope
+ if s.IsPreSign {
+ query.Set(v4Internal.AmzCredentialKey, credentialStr)
+ }
+
+ unsignedHeaders := headers
+ if s.IsPreSign && !s.DisableHeaderHoisting {
+ var urlValues url.Values
+ urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers)
+ for k := range urlValues {
+ query[k] = urlValues[k]
+ }
+ }
+
+ host := req.URL.Host
+ if len(req.Host) > 0 {
+ host = req.Host
+ }
+
+ signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
+
+ if s.IsPreSign {
+ query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr)
+ }
+
+ var rawQuery strings.Builder
+ rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1))
+
+ canonicalURI := v4Internal.GetURIPath(req.URL)
+ if !s.DisableURIPathEscaping {
+ canonicalURI = httpbinding.EscapePath(canonicalURI, false)
+ }
+
+ canonicalString := s.buildCanonicalString(
+ req.Method,
+ canonicalURI,
+ rawQuery.String(),
+ signedHeadersStr,
+ canonicalHeaderStr,
+ )
+
+ strToSign := s.buildStringToSign(credentialScope, canonicalString)
+ signingSignature, err := s.buildSignature(strToSign)
+ if err != nil {
+ return signedRequest{}, err
+ }
+
+ if s.IsPreSign {
+ rawQuery.WriteString("&X-Amz-Signature=")
+ rawQuery.WriteString(signingSignature)
+ } else {
+ headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
+ }
+
+ req.URL.RawQuery = rawQuery.String()
+
+ return signedRequest{
+ Request: req,
+ SignedHeaders: signedHeaders,
+ CanonicalString: canonicalString,
+ StringToSign: strToSign,
+ PreSigned: s.IsPreSign,
+ }, nil
+}
+
+func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
+ const credential = "Credential="
+ const signedHeaders = "SignedHeaders="
+ const signature = "Signature="
+ const commaSpace = ", "
+
+ var parts strings.Builder
+ parts.Grow(len(signingAlgorithm) + 1 +
+ len(credential) + len(credentialStr) + 2 +
+ len(signedHeaders) + len(signedHeadersStr) + 2 +
+ len(signature) + len(signingSignature),
+ )
+ parts.WriteString(signingAlgorithm)
+ parts.WriteRune(' ')
+ parts.WriteString(credential)
+ parts.WriteString(credentialStr)
+ parts.WriteString(commaSpace)
+ parts.WriteString(signedHeaders)
+ parts.WriteString(signedHeadersStr)
+ parts.WriteString(commaSpace)
+ parts.WriteString(signature)
+ parts.WriteString(signingSignature)
+ return parts.String()
+}
+
+// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
+// must be provided. Even if the request has no payload (aka body). If the
+// request has no payload you should use the hex encoded SHA-256 of an empty
+// string as the payloadHash value.
+//
+// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+//
+// Some services such as Amazon S3 accept alternative values for the payload
+// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
+// included in the request signature.
+//
+// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The passed in request will be modified in place.
+func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error {
+ options := s.options
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ signer := &httpSigner{
+ Request: r,
+ PayloadHash: payloadHash,
+ ServiceName: service,
+ Region: region,
+ Credentials: credentials,
+ Time: v4Internal.NewSigningTime(signingTime.UTC()),
+ DisableHeaderHoisting: options.DisableHeaderHoisting,
+ DisableURIPathEscaping: options.DisableURIPathEscaping,
+ DisableSessionToken: options.DisableSessionToken,
+ KeyDerivator: s.keyDerivator,
+ }
+
+ signedRequest, err := signer.Build()
+ if err != nil {
+ return err
+ }
+
+ logSigningInfo(ctx, options, &signedRequest, false)
+
+ return nil
+}
+
+// PresignHTTP signs AWS v4 requests with the payload hash, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns the signed URL and the map of HTTP headers that were included in the
+// signature or an error if signing the request failed. For presigned requests
+// these headers and their values must be included on the HTTP request when it
+// is made. This is helpful to know what header values need to be shared with
+// the party the presigned request will be distributed to.
+//
+// The payloadHash is the hex encoded SHA-256 hash of the request payload, and
+// must be provided. Even if the request has no payload (aka body). If the
+// request has no payload you should use the hex encoded SHA-256 of an empty
+// string as the payloadHash value.
+//
+// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+//
+// Some services such as Amazon S3 accept alternative values for the payload
+// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
+// included in the request signature.
+//
+// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
+//
+// PresignHTTP differs from SignHTTP in that it will sign the request using
+// query string instead of header values. This allows you to share the
+// Presigned Request's URL with third parties, or distribute it throughout your
+// system with minimal dependencies.
+//
+// PresignHTTP will not set the expires time of the presigned request
+// automatically. To specify the expire duration for a request add the
+// "X-Amz-Expires" query parameter on the request with the value as the
+// duration in seconds the presigned URL should be considered valid for. This
+// parameter is not used by all AWS services, and is most notable used by
+// Amazon S3 APIs.
+//
+// expires := 20 * time.Minute
+// query := req.URL.Query()
+// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10))
+// req.URL.RawQuery = query.Encode()
+//
+// This method does not modify the provided request.
+func (s *Signer) PresignHTTP(
+ ctx context.Context, credentials aws.Credentials, r *http.Request,
+ payloadHash string, service string, region string, signingTime time.Time,
+ optFns ...func(*SignerOptions),
+) (signedURI string, signedHeaders http.Header, err error) {
+ options := s.options
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ signer := &httpSigner{
+ Request: r.Clone(r.Context()),
+ PayloadHash: payloadHash,
+ ServiceName: service,
+ Region: region,
+ Credentials: credentials,
+ Time: v4Internal.NewSigningTime(signingTime.UTC()),
+ IsPreSign: true,
+ DisableHeaderHoisting: options.DisableHeaderHoisting,
+ DisableURIPathEscaping: options.DisableURIPathEscaping,
+ DisableSessionToken: options.DisableSessionToken,
+ KeyDerivator: s.keyDerivator,
+ }
+
+ signedRequest, err := signer.Build()
+ if err != nil {
+ return "", nil, err
+ }
+
+ logSigningInfo(ctx, options, &signedRequest, true)
+
+ signedHeaders = make(http.Header)
+
+ // For the signed headers we canonicalize the header keys in the returned map.
+ // This avoids situations where can standard library double headers like host header. For example the standard
+ // library will set the Host header, even if it is present in lower-case form.
+ for k, v := range signedRequest.SignedHeaders {
+ key := textproto.CanonicalMIMEHeaderKey(k)
+ signedHeaders[key] = append(signedHeaders[key], v...)
+ }
+
+ return signedRequest.Request.URL.String(), signedHeaders, nil
+}
+
+func (s *httpSigner) buildCredentialScope() string {
+ return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName)
+}
+
+func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+
+ // A list of headers to be converted to lower case to mitigate a limitation from S3
+ lowerCaseHeaders := map[string]string{
+ "X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508
+ "X-Amz-Request-Payer": "x-amz-request-payer", // see #2764
+ }
+
+ for k, h := range header {
+ if newKey, ok := lowerCaseHeaders[k]; ok {
+ k = newKey
+ }
+
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+
+func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
+ signed = make(http.Header)
+
+ var headers []string
+ const hostHeader = "host"
+ headers = append(headers, hostHeader)
+ signed[hostHeader] = append(signed[hostHeader], host)
+
+ const contentLengthHeader = "content-length"
+ if length > 0 {
+ headers = append(headers, contentLengthHeader)
+ signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
+ }
+
+ for k, v := range header {
+ if !rule.IsValid(k) {
+ continue // ignored header
+ }
+ if strings.EqualFold(k, contentLengthHeader) {
+ // prevent signing already handled content-length header.
+ continue
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := signed[lowerCaseKey]; ok {
+ // include additional values
+ signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ signed[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ signedHeaders = strings.Join(headers, ";")
+
+ var canonicalHeaders strings.Builder
+ n := len(headers)
+ const colon = ':'
+ for i := 0; i < n; i++ {
+ if headers[i] == hostHeader {
+ canonicalHeaders.WriteString(hostHeader)
+ canonicalHeaders.WriteRune(colon)
+ canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
+ } else {
+ canonicalHeaders.WriteString(headers[i])
+ canonicalHeaders.WriteRune(colon)
+ // Trim out leading, trailing, and dedup inner spaces from signed header values.
+ values := signed[headers[i]]
+ for j, v := range values {
+ cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
+ canonicalHeaders.WriteString(cleanedValue)
+ if j < len(values)-1 {
+ canonicalHeaders.WriteRune(',')
+ }
+ }
+ }
+ canonicalHeaders.WriteRune('\n')
+ }
+ canonicalHeadersStr = canonicalHeaders.String()
+
+ return signed, signedHeaders, canonicalHeadersStr
+}
+
+func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
+ return strings.Join([]string{
+ method,
+ uri,
+ query,
+ canonicalHeaders,
+ signedHeaders,
+ s.PayloadHash,
+ }, "\n")
+}
+
+func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
+ return strings.Join([]string{
+ signingAlgorithm,
+ s.Time.TimeFormat(),
+ credentialScope,
+ hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
+ }, "\n")
+}
+
+func makeHash(hash hash.Hash, b []byte) []byte {
+ hash.Reset()
+ hash.Write(b)
+ return hash.Sum(nil)
+}
+
+func (s *httpSigner) buildSignature(strToSign string) (string, error) {
+ key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time)
+ return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil
+}
+
+func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
+ amzDate := s.Time.TimeFormat()
+
+ if s.IsPreSign {
+ query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm)
+ sessionToken := s.Credentials.SessionToken
+ if !s.DisableSessionToken && len(sessionToken) > 0 {
+ query.Set("X-Amz-Security-Token", sessionToken)
+ }
+
+ query.Set(v4Internal.AmzDateKey, amzDate)
+ return
+ }
+
+ headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate)
+
+ if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 {
+ headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken)
+ }
+}
+
+func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) {
+ if !options.LogSigning {
+ return
+ }
+ signedURLMsg := ""
+ if isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String())
+ }
+ logger := logging.WithContext(ctx, options.Logger)
+ logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg)
+}
+
+type signedRequest struct {
+ Request *http.Request
+ SignedHeaders http.Header
+ CanonicalString string
+ StringToSign string
+ PreSigned bool
+}
+
+const logSignInfoMsg = `Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go
new file mode 100644
index 000000000..f3fc4d610
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go
@@ -0,0 +1,297 @@
+// Code generated by aws/generate.go DO NOT EDIT.
+
+package aws
+
+import (
+ "github.com/aws/smithy-go/ptr"
+ "time"
+)
+
+// Bool returns a pointer value for the bool value passed in.
+func Bool(v bool) *bool {
+ return ptr.Bool(v)
+}
+
+// BoolSlice returns a slice of bool pointers from the values
+// passed in.
+func BoolSlice(vs []bool) []*bool {
+ return ptr.BoolSlice(vs)
+}
+
+// BoolMap returns a map of bool pointers from the values
+// passed in.
+func BoolMap(vs map[string]bool) map[string]*bool {
+ return ptr.BoolMap(vs)
+}
+
+// Byte returns a pointer value for the byte value passed in.
+func Byte(v byte) *byte {
+ return ptr.Byte(v)
+}
+
+// ByteSlice returns a slice of byte pointers from the values
+// passed in.
+func ByteSlice(vs []byte) []*byte {
+ return ptr.ByteSlice(vs)
+}
+
+// ByteMap returns a map of byte pointers from the values
+// passed in.
+func ByteMap(vs map[string]byte) map[string]*byte {
+ return ptr.ByteMap(vs)
+}
+
+// String returns a pointer value for the string value passed in.
+func String(v string) *string {
+ return ptr.String(v)
+}
+
+// StringSlice returns a slice of string pointers from the values
+// passed in.
+func StringSlice(vs []string) []*string {
+ return ptr.StringSlice(vs)
+}
+
+// StringMap returns a map of string pointers from the values
+// passed in.
+func StringMap(vs map[string]string) map[string]*string {
+ return ptr.StringMap(vs)
+}
+
+// Int returns a pointer value for the int value passed in.
+func Int(v int) *int {
+ return ptr.Int(v)
+}
+
+// IntSlice returns a slice of int pointers from the values
+// passed in.
+func IntSlice(vs []int) []*int {
+ return ptr.IntSlice(vs)
+}
+
+// IntMap returns a map of int pointers from the values
+// passed in.
+func IntMap(vs map[string]int) map[string]*int {
+ return ptr.IntMap(vs)
+}
+
+// Int8 returns a pointer value for the int8 value passed in.
+func Int8(v int8) *int8 {
+ return ptr.Int8(v)
+}
+
+// Int8Slice returns a slice of int8 pointers from the values
+// passed in.
+func Int8Slice(vs []int8) []*int8 {
+ return ptr.Int8Slice(vs)
+}
+
+// Int8Map returns a map of int8 pointers from the values
+// passed in.
+func Int8Map(vs map[string]int8) map[string]*int8 {
+ return ptr.Int8Map(vs)
+}
+
+// Int16 returns a pointer value for the int16 value passed in.
+func Int16(v int16) *int16 {
+ return ptr.Int16(v)
+}
+
+// Int16Slice returns a slice of int16 pointers from the values
+// passed in.
+func Int16Slice(vs []int16) []*int16 {
+ return ptr.Int16Slice(vs)
+}
+
+// Int16Map returns a map of int16 pointers from the values
+// passed in.
+func Int16Map(vs map[string]int16) map[string]*int16 {
+ return ptr.Int16Map(vs)
+}
+
+// Int32 returns a pointer value for the int32 value passed in.
+func Int32(v int32) *int32 {
+ return ptr.Int32(v)
+}
+
+// Int32Slice returns a slice of int32 pointers from the values
+// passed in.
+func Int32Slice(vs []int32) []*int32 {
+ return ptr.Int32Slice(vs)
+}
+
+// Int32Map returns a map of int32 pointers from the values
+// passed in.
+func Int32Map(vs map[string]int32) map[string]*int32 {
+ return ptr.Int32Map(vs)
+}
+
+// Int64 returns a pointer value for the int64 value passed in.
+func Int64(v int64) *int64 {
+ return ptr.Int64(v)
+}
+
+// Int64Slice returns a slice of int64 pointers from the values
+// passed in.
+func Int64Slice(vs []int64) []*int64 {
+ return ptr.Int64Slice(vs)
+}
+
+// Int64Map returns a map of int64 pointers from the values
+// passed in.
+func Int64Map(vs map[string]int64) map[string]*int64 {
+ return ptr.Int64Map(vs)
+}
+
+// Uint returns a pointer value for the uint value passed in.
+func Uint(v uint) *uint {
+ return ptr.Uint(v)
+}
+
+// UintSlice returns a slice of uint pointers from the values
+// passed in.
+func UintSlice(vs []uint) []*uint {
+ return ptr.UintSlice(vs)
+}
+
+// UintMap returns a map of uint pointers from the values
+// passed in.
+func UintMap(vs map[string]uint) map[string]*uint {
+ return ptr.UintMap(vs)
+}
+
+// Uint8 returns a pointer value for the uint8 value passed in.
+func Uint8(v uint8) *uint8 {
+ return ptr.Uint8(v)
+}
+
+// Uint8Slice returns a slice of uint8 pointers from the values
+// passed in.
+func Uint8Slice(vs []uint8) []*uint8 {
+ return ptr.Uint8Slice(vs)
+}
+
+// Uint8Map returns a map of uint8 pointers from the values
+// passed in.
+func Uint8Map(vs map[string]uint8) map[string]*uint8 {
+ return ptr.Uint8Map(vs)
+}
+
+// Uint16 returns a pointer value for the uint16 value passed in.
+func Uint16(v uint16) *uint16 {
+ return ptr.Uint16(v)
+}
+
+// Uint16Slice returns a slice of uint16 pointers from the values
+// passed in.
+func Uint16Slice(vs []uint16) []*uint16 {
+ return ptr.Uint16Slice(vs)
+}
+
+// Uint16Map returns a map of uint16 pointers from the values
+// passed in.
+func Uint16Map(vs map[string]uint16) map[string]*uint16 {
+ return ptr.Uint16Map(vs)
+}
+
+// Uint32 returns a pointer value for the uint32 value passed in.
+func Uint32(v uint32) *uint32 {
+ return ptr.Uint32(v)
+}
+
+// Uint32Slice returns a slice of uint32 pointers from the values
+// passed in.
+func Uint32Slice(vs []uint32) []*uint32 {
+ return ptr.Uint32Slice(vs)
+}
+
+// Uint32Map returns a map of uint32 pointers from the values
+// passed in.
+func Uint32Map(vs map[string]uint32) map[string]*uint32 {
+ return ptr.Uint32Map(vs)
+}
+
+// Uint64 returns a pointer value for the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+ return ptr.Uint64(v)
+}
+
+// Uint64Slice returns a slice of uint64 pointers from the values
+// passed in.
+func Uint64Slice(vs []uint64) []*uint64 {
+ return ptr.Uint64Slice(vs)
+}
+
+// Uint64Map returns a map of uint64 pointers from the values
+// passed in.
+func Uint64Map(vs map[string]uint64) map[string]*uint64 {
+ return ptr.Uint64Map(vs)
+}
+
+// Float32 returns a pointer value for the float32 value passed in.
+func Float32(v float32) *float32 {
+ return ptr.Float32(v)
+}
+
+// Float32Slice returns a slice of float32 pointers from the values
+// passed in.
+func Float32Slice(vs []float32) []*float32 {
+ return ptr.Float32Slice(vs)
+}
+
+// Float32Map returns a map of float32 pointers from the values
+// passed in.
+func Float32Map(vs map[string]float32) map[string]*float32 {
+ return ptr.Float32Map(vs)
+}
+
+// Float64 returns a pointer value for the float64 value passed in.
+func Float64(v float64) *float64 {
+ return ptr.Float64(v)
+}
+
+// Float64Slice returns a slice of float64 pointers from the values
+// passed in.
+func Float64Slice(vs []float64) []*float64 {
+ return ptr.Float64Slice(vs)
+}
+
+// Float64Map returns a map of float64 pointers from the values
+// passed in.
+func Float64Map(vs map[string]float64) map[string]*float64 {
+ return ptr.Float64Map(vs)
+}
+
+// Time returns a pointer value for the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return ptr.Time(v)
+}
+
+// TimeSlice returns a slice of time.Time pointers from the values
+// passed in.
+func TimeSlice(vs []time.Time) []*time.Time {
+ return ptr.TimeSlice(vs)
+}
+
+// TimeMap returns a map of time.Time pointers from the values
+// passed in.
+func TimeMap(vs map[string]time.Time) map[string]*time.Time {
+ return ptr.TimeMap(vs)
+}
+
+// Duration returns a pointer value for the time.Duration value passed in.
+func Duration(v time.Duration) *time.Duration {
+ return ptr.Duration(v)
+}
+
+// DurationSlice returns a slice of time.Duration pointers from the values
+// passed in.
+func DurationSlice(vs []time.Duration) []*time.Duration {
+ return ptr.DurationSlice(vs)
+}
+
+// DurationMap returns a map of time.Duration pointers from the values
+// passed in.
+func DurationMap(vs map[string]time.Duration) map[string]*time.Duration {
+ return ptr.DurationMap(vs)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go
new file mode 100644
index 000000000..8d7c35a9e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go
@@ -0,0 +1,342 @@
+package http
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http"
+ "reflect"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go/tracing"
+)
+
+// Defaults for the HTTPTransportBuilder.
+var (
+ // Default connection pool options
+ DefaultHTTPTransportMaxIdleConns = 100
+ DefaultHTTPTransportMaxIdleConnsPerHost = 10
+
+ // Default connection timeouts
+ DefaultHTTPTransportIdleConnTimeout = 90 * time.Second
+ DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second
+ DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second
+
+ // Default to TLS 1.2 for all HTTPS requests.
+ DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12
+)
+
+// Timeouts for net.Dialer's network connection.
+var (
+ DefaultDialConnectTimeout = 30 * time.Second
+ DefaultDialKeepAliveTimeout = 30 * time.Second
+)
+
+// BuildableClient provides a HTTPClient implementation with options to
+// create copies of the HTTPClient when additional configuration is provided.
+//
+// The client's methods will not share the http.Transport value between copies
+// of the BuildableClient. Only exported member values of the Transport and
+// optional Dialer will be copied between copies of BuildableClient.
+type BuildableClient struct {
+ transport *http.Transport
+ dialer *net.Dialer
+
+ initOnce sync.Once
+
+ clientTimeout time.Duration
+ client *http.Client
+}
+
+// NewBuildableClient returns an initialized client for invoking HTTP
+// requests.
+func NewBuildableClient() *BuildableClient {
+ return &BuildableClient{}
+}
+
+// Do implements the HTTPClient interface's Do method to invoke a HTTP request,
+// and receive the response. Uses the BuildableClient's current
+// configuration to invoke the http.Request.
+//
+// If connection pooling is enabled (aka HTTP KeepAlive) the client will only
+// share pooled connections with its own instance. Copies of the
+// BuildableClient will have their own connection pools.
+//
+// Redirect (3xx) responses will not be followed, the HTTP response received
+// will returned instead.
+func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) {
+ b.initOnce.Do(b.build)
+
+ return b.client.Do(req)
+}
+
+// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient.
+// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client.
+func (b *BuildableClient) Freeze() aws.HTTPClient {
+ cpy := b.clone()
+ cpy.build()
+ return cpy.client
+}
+
+func (b *BuildableClient) build() {
+ b.client = wrapWithLimitedRedirect(&http.Client{
+ Timeout: b.clientTimeout,
+ Transport: b.GetTransport(),
+ })
+}
+
+func (b *BuildableClient) clone() *BuildableClient {
+ cpy := NewBuildableClient()
+ cpy.transport = b.GetTransport()
+ cpy.dialer = b.GetDialer()
+ cpy.clientTimeout = b.clientTimeout
+
+ return cpy
+}
+
+// WithTransportOptions copies the BuildableClient and returns it with the
+// http.Transport options applied.
+//
+// If a non (*http.Transport) was set as the round tripper, the round tripper
+// will be replaced with a default Transport value before invoking the option
+// functions.
+func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient {
+ cpy := b.clone()
+
+ tr := cpy.GetTransport()
+ for _, opt := range opts {
+ opt(tr)
+ }
+ cpy.transport = tr
+
+ return cpy
+}
+
+// WithDialerOptions copies the BuildableClient and returns it with the
+// net.Dialer options applied. Will set the client's http.Transport DialContext
+// member.
+func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient {
+ cpy := b.clone()
+
+ dialer := cpy.GetDialer()
+ for _, opt := range opts {
+ opt(dialer)
+ }
+ cpy.dialer = dialer
+
+ tr := cpy.GetTransport()
+ tr.DialContext = cpy.dialer.DialContext
+ cpy.transport = tr
+
+ return cpy
+}
+
+// WithTimeout Sets the timeout used by the client for all requests.
+func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient {
+ cpy := b.clone()
+ cpy.clientTimeout = timeout
+ return cpy
+}
+
+// GetTransport returns a copy of the client's HTTP Transport.
+func (b *BuildableClient) GetTransport() *http.Transport {
+ var tr *http.Transport
+ if b.transport != nil {
+ tr = b.transport.Clone()
+ } else {
+ tr = defaultHTTPTransport()
+ }
+
+ return tr
+}
+
+// GetDialer returns a copy of the client's network dialer.
+func (b *BuildableClient) GetDialer() *net.Dialer {
+ var dialer *net.Dialer
+ if b.dialer != nil {
+ dialer = shallowCopyStruct(b.dialer).(*net.Dialer)
+ } else {
+ dialer = defaultDialer()
+ }
+
+ return dialer
+}
+
+// GetTimeout returns a copy of the client's timeout to cancel requests with.
+func (b *BuildableClient) GetTimeout() time.Duration {
+ return b.clientTimeout
+}
+
+func defaultDialer() *net.Dialer {
+ return &net.Dialer{
+ Timeout: DefaultDialConnectTimeout,
+ KeepAlive: DefaultDialKeepAliveTimeout,
+ DualStack: true,
+ }
+}
+
+func defaultHTTPTransport() *http.Transport {
+ dialer := defaultDialer()
+
+ tr := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: traceDialContext(dialer.DialContext),
+ TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout,
+ MaxIdleConns: DefaultHTTPTransportMaxIdleConns,
+ MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost,
+ IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout,
+ ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout,
+ ForceAttemptHTTP2: true,
+ TLSClientConfig: &tls.Config{
+ MinVersion: DefaultHTTPTransportTLSMinVersion,
+ },
+ }
+
+ return tr
+}
+
+type dialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+func traceDialContext(dc dialContext) dialContext {
+ return func(ctx context.Context, network, addr string) (net.Conn, error) {
+ span, _ := tracing.GetSpan(ctx)
+ span.SetProperty("net.peer.name", addr)
+
+ conn, err := dc(ctx, network, addr)
+ if err != nil {
+ return conn, err
+ }
+
+ raddr := conn.RemoteAddr()
+ if raddr == nil {
+ return conn, err
+ }
+
+ host, port, err := net.SplitHostPort(raddr.String())
+ if err != nil { // don't blow up just because we couldn't parse
+ span.SetProperty("net.peer.addr", raddr.String())
+ } else {
+ span.SetProperty("net.peer.host", host)
+ span.SetProperty("net.peer.port", port)
+ }
+
+ return conn, err
+ }
+}
+
+// shallowCopyStruct creates a shallow copy of the passed in source struct, and
+// returns that copy of the same struct type.
+func shallowCopyStruct(src interface{}) interface{} {
+ srcVal := reflect.ValueOf(src)
+ srcValType := srcVal.Type()
+
+ var returnAsPtr bool
+ if srcValType.Kind() == reflect.Ptr {
+ srcVal = srcVal.Elem()
+ srcValType = srcValType.Elem()
+ returnAsPtr = true
+ }
+ dstVal := reflect.New(srcValType).Elem()
+
+ for i := 0; i < srcValType.NumField(); i++ {
+ ft := srcValType.Field(i)
+ if len(ft.PkgPath) != 0 {
+ // unexported fields have a PkgPath
+ continue
+ }
+
+ dstVal.Field(i).Set(srcVal.Field(i))
+ }
+
+ if returnAsPtr {
+ dstVal = dstVal.Addr()
+ }
+
+ return dstVal.Interface()
+}
+
+// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to
+// not follow any redirect other than 307 and 308. No other redirect will be
+// followed.
+//
+// If the client does not have a Transport defined will use a new SDK default
+// http.Transport configuration.
+func wrapWithLimitedRedirect(c *http.Client) *http.Client {
+ tr := c.Transport
+ if tr == nil {
+ tr = defaultHTTPTransport()
+ }
+
+ cc := *c
+ cc.CheckRedirect = limitedRedirect
+ cc.Transport = suppressBadHTTPRedirectTransport{
+ tr: tr,
+ }
+
+ return &cc
+}
+
+// limitedRedirect is a CheckRedirect that prevents the client from following
+// any non 307/308 HTTP status code redirects.
+//
+// The 307 and 308 redirects are allowed because the client must use the
+// original HTTP method for the redirected to location. Whereas 301 and 302
+// allow the client to switch to GET for the redirect.
+//
+// Suppresses all redirect requests with a URL of badHTTPRedirectLocation.
+func limitedRedirect(r *http.Request, via []*http.Request) error {
+ // Request.Response, in CheckRedirect is the response that is triggering
+ // the redirect.
+ resp := r.Response
+ if r.URL.String() == badHTTPRedirectLocation {
+ resp.Header.Del(badHTTPRedirectLocation)
+ return http.ErrUseLastResponse
+ }
+
+ switch resp.StatusCode {
+ case 307, 308:
+ // Only allow 307 and 308 redirects as they preserve the method.
+ return nil
+ }
+
+ return http.ErrUseLastResponse
+}
+
+// suppressBadHTTPRedirectTransport provides an http.RoundTripper
+// implementation that wraps another http.RoundTripper to prevent HTTP client
+// receiving 301 and 302 HTTP responses redirects without the required location
+// header.
+//
+// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect,
+// that check for responses with having a URL of baseHTTPRedirectLocation, and
+// suppress the redirect.
+type suppressBadHTTPRedirectTransport struct {
+ tr http.RoundTripper
+}
+
+const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation`
+
+// RoundTrip backfills a stub location when a 301/302 response is received
+// without a location. This stub location is used by limitedRedirect to prevent
+// the HTTP client from failing attempting to use follow a redirect without a
+// location value.
+func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ resp, err := t.tr.RoundTrip(r)
+ if err != nil {
+ return resp, err
+ }
+
+ // S3 is the only known service to return 301 without location header.
+ // The Go standard library HTTP client will return an opaque error if it
+ // tries to follow a 301/302 response missing the location header.
+ switch resp.StatusCode {
+ case 301, 302:
+ if v := resp.Header.Get("Location"); len(v) == 0 {
+ resp.Header.Set("Location", badHTTPRedirectLocation)
+ }
+ }
+
+ return resp, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go
new file mode 100644
index 000000000..556f54a7f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go
@@ -0,0 +1,42 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// removeContentTypeHeader is a build middleware that removes
+// content type header if content-length header is unset or
+// is set to zero,
+type removeContentTypeHeader struct {
+}
+
+// ID the name of the middleware.
+func (m *removeContentTypeHeader) ID() string {
+ return "RemoveContentTypeHeader"
+}
+
+// HandleBuild adds or appends the constructed user agent to the request.
+func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in)
+ }
+
+ // remove contentTypeHeader when content-length is zero
+ if req.ContentLength == 0 {
+ req.Header.Del("content-type")
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+// RemoveContentTypeHeader removes content-type header if
+// content length is unset or equal to zero.
+func RemoveContentTypeHeader(stack *middleware.Stack) error {
+ return stack.Build.Add(&removeContentTypeHeader{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go
new file mode 100644
index 000000000..44651c990
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go
@@ -0,0 +1,33 @@
+package http
+
+import (
+ "errors"
+ "fmt"
+
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ResponseError provides the HTTP centric error type wrapping the underlying error
+// with the HTTP response value and the deserialized RequestID.
+type ResponseError struct {
+ *smithyhttp.ResponseError
+
+ // RequestID associated with response error
+ RequestID string
+}
+
+// ServiceRequestID returns the request id associated with Response Error
+func (e *ResponseError) ServiceRequestID() string { return e.RequestID }
+
+// Error returns the formatted error
+func (e *ResponseError) Error() string {
+ return fmt.Sprintf(
+ "https response error StatusCode: %d, RequestID: %s, %v",
+ e.Response.StatusCode, e.RequestID, e.Err)
+}
+
+// As populates target and returns true if the type of target is a error type that
+// the ResponseError embeds, (e.g.AWS HTTP ResponseError)
+func (e *ResponseError) As(target interface{}) bool {
+ return errors.As(e.ResponseError, target)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
new file mode 100644
index 000000000..a1ad20fe3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
@@ -0,0 +1,56 @@
+package http
+
+import (
+ "context"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddResponseErrorMiddleware adds response error wrapper middleware
+func AddResponseErrorMiddleware(stack *middleware.Stack) error {
+ // add error wrapper middleware before request id retriever middleware so that it can wrap the error response
+ // returned by operation deserializers
+ return stack.Deserialize.Insert(&ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+}
+
+// ResponseErrorWrapper wraps operation errors with ResponseError.
+type ResponseErrorWrapper struct {
+}
+
+// ID returns the middleware identifier
+func (m *ResponseErrorWrapper) ID() string {
+ return "ResponseErrorWrapper"
+}
+
+// HandleDeserialize wraps the stack error with smithyhttp.ResponseError.
+func (m *ResponseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err == nil {
+ // Nothing to do when there is no error.
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ // No raw response to wrap with.
+ return out, metadata, err
+ }
+
+ // look for request id in metadata
+ reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata)
+
+ // Wrap the returned smithy error with the request id retrieved from the metadata
+ err = &ResponseError{
+ ResponseError: &smithyhttp.ResponseError{
+ Response: resp,
+ Err: err,
+ },
+ RequestID: reqID,
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go
new file mode 100644
index 000000000..4881ae144
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go
@@ -0,0 +1,109 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// ResponseTimeoutError is an error when the reads from the response are
+// delayed longer than the timeout the read was configured for.
+type ResponseTimeoutError struct {
+ TimeoutDur time.Duration
+}
+
+// Timeout returns that the error is was caused by a timeout, and can be
+// retried.
+func (*ResponseTimeoutError) Timeout() bool { return true }
+
+func (e *ResponseTimeoutError) Error() string {
+ return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur)
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, &ResponseTimeoutError{TimeoutDur: r.duration}
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the
+// response body so that a read that takes too long will return an error.
+//
+// Deprecated: This API was previously exposed to customize behavior of the
+// Kinesis service. That customization has been removed and this middleware's
+// implementation can cause panics within the standard library networking loop.
+// See #2752.
+func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error {
+ return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After)
+}
+
+// readTimeout wraps the response body with a timeoutReadCloser
+type readTimeout struct {
+ duration time.Duration
+}
+
+// ID returns the id of the middleware
+func (*readTimeout) ID() string {
+ return "ReadResponseTimeout"
+}
+
+// HandleDeserialize implements the DeserializeMiddleware interface
+func (m *readTimeout) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ response.Body = &timeoutReadCloser{
+ reader: response.Body,
+ duration: m.duration,
+ }
+ out.RawResponse = response
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go
new file mode 100644
index 000000000..cc3ae8114
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go
@@ -0,0 +1,42 @@
+package aws
+
+import (
+ "fmt"
+)
+
+// Ternary is an enum allowing an unknown or none state in addition to a bool's
+// true and false.
+type Ternary int
+
+func (t Ternary) String() string {
+ switch t {
+ case UnknownTernary:
+ return "unknown"
+ case FalseTernary:
+ return "false"
+ case TrueTernary:
+ return "true"
+ default:
+ return fmt.Sprintf("unknown value, %d", int(t))
+ }
+}
+
+// Bool returns true if the value is TrueTernary, false otherwise.
+func (t Ternary) Bool() bool {
+ return t == TrueTernary
+}
+
+// Enumerations for the values of the Ternary type.
+const (
+ UnknownTernary Ternary = iota
+ FalseTernary
+ TrueTernary
+)
+
+// BoolTernary returns a true or false Ternary value for the bool provided.
+func BoolTernary(v bool) Ternary {
+ if v {
+ return TrueTernary
+ }
+ return FalseTernary
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go
new file mode 100644
index 000000000..5f729d45e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go-v2"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = goModuleVersion
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
new file mode 100644
index 000000000..b6057249b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
@@ -0,0 +1,967 @@
+# v1.31.17 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.31.16 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.15 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.14 (2025-10-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.13 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.12 (2025-09-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.11 (2025-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.10 (2025-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.9 (2025-09-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.8 (2025-09-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.7 (2025-09-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.6 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.5 (2025-08-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.4 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.3 (2025-08-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.2 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.1 (2025-08-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.0 (2025-08-11)
+
+* **Feature**: Add support for configuring per-service Options via callback on global config.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.3 (2025-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.2 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.1 (2025-07-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.18 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.17 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.16 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.15 (2025-06-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.14 (2025-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.13 (2025-04-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.12 (2025-03-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.11 (2025-03-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.10 (2025-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.9 (2025-03-04.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.8 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.7 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.6 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.5 (2025-02-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.4 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.3 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.2 (2025-01-24)
+
+* **Bug Fix**: Fix env config naming and usage of deprecated ioutil
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.29.1 (2025-01-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.0 (2025-01-15)
+
+* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.11 (2025-01-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.10 (2025-01-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.9 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.8 (2025-01-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.7 (2024-12-19)
+
+* **Bug Fix**: Fix improper use of printf-style functions.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.6 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.5 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.4 (2024-11-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.3 (2024-11-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.1 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.0 (2024-10-16)
+
+* **Feature**: Adds the LoadOptions hook `WithBaseEndpoint` for setting global endpoint override in-code.
+
+# v1.27.43 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.42 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.41 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.40 (2024-10-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.39 (2024-09-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.38 (2024-09-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.37 (2024-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.36 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.35 (2024-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.34 (2024-09-16)
+
+* **Bug Fix**: Read `AWS_CONTAINER_CREDENTIALS_FULL_URI` env variable if set when reading a profile with `credential_source`. Also ensure `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI` is always read before it
+
+# v1.27.33 (2024-09-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.32 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.31 (2024-08-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.30 (2024-08-23)
+
+* **Bug Fix**: Don't fail credentials unit tests if credentials are found on a file
+
+# v1.27.29 (2024-08-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.28 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.27 (2024-07-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.26 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.25 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.24 (2024-07-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.23 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.22 (2024-06-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.21 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.20 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.19 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.18 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.17 (2024-06-03)
+
+* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.16 (2024-05-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.15 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.14 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.13 (2024-05-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.12 (2024-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.11 (2024-04-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.10 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.9 (2024-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.8 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.7 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.6 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.5 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.4 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.3 (2024-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2024-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2024-01-22)
+
+* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.5 (2024-01-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.4 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2023-12-08)
+
+* **Bug Fix**: Correct loading of [services *] sections into shared config.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2023-12-07)
+
+* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.12 (2023-12-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.11 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.10 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.9 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.8 (2023-11-28.3)
+
+* **Bug Fix**: Correct resolution of S3Express auth disable toggle.
+
+# v1.25.7 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.6 (2023-11-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.5 (2023-11-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.3 (2023-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.2 (2023-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.1 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2023-11-14)
+
+* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2023-11-13)
+
+* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2023-11-09.2)
+
+* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.3 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.2 (2023-11-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2023-11-06)
+
+* No change notes available for this release.
+
+# v1.22.0 (2023-11-02)
+
+* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2023-10-24)
+
+* No change notes available for this release.
+
+# v1.19.0 (2023-10-16)
+
+* **Feature**: Modify logic of retrieving user agent appID from env config
+
+# v1.18.45 (2023-10-12)
+
+* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.44 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.43 (2023-10-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.42 (2023-09-22)
+
+* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0.
+* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.41 (2023-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.40 (2023-09-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.39 (2023-09-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.38 (2023-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.37 (2023-08-23)
+
+* No change notes available for this release.
+
+# v1.18.36 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.35 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.34 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.33 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.32 (2023-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.29 (2023-07-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.28 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.27 (2023-06-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.26 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.25 (2023-05-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.24 (2023-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.23 (2023-05-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.22 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.21 (2023-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.20 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.19 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.18 (2023-03-16)
+
+* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015.
+
+# v1.18.17 (2023-03-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.16 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.15 (2023-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.14 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.13 (2023-02-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.12 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.11 (2023-02-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.10 (2023-01-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.9 (2023-01-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.8 (2023-01-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2022-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.6 (2022-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2022-12-15)
+
+* **Bug Fix**: Unify logic between shared config and in finding home directory
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.4 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2022-11-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.2 (2022-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2022-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2022-11-11)
+
+* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
+* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.11 (2022-11-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.10 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.9 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.8 (2022-09-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2022-08-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2022-08-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2022-08-14)
+
+* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present.
+
+# v1.16.1 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2022-08-10)
+
+* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`.
+
+# v1.15.17 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.16 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.15 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.14 (2022-07-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.13 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.12 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.11 (2022-06-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.10 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.9 (2022-05-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.8 (2022-05-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.7 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.6 (2022-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.5 (2022-05-09)
+
+* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682)
+
+# v1.15.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2022-02-24)
+
+* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options.
+* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589)
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2022-01-28)
+
+* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR.
+* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug.
+* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563)
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-01-07)
+
+* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2021-12-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2021-12-02)
+
+* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.3 (2021-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.2 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.1 (2021-11-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.3 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.2 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.1 (2021-09-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-09-02)
+
+* **Feature**: Add support for S3 Multi-Region Access Point ARNs.
+
+# v1.7.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.1 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-08-04)
+
+* **Feature**: adds error handling for defered close calls
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-07-15)
+
+* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints.
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-07-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-06-25)
+
+* **Feature**: Adds configuration setting for enabling endpoint discovery.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-05-20)
+
+* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile.
+* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/auth_scheme_preference.go b/vendor/github.com/aws/aws-sdk-go-v2/config/auth_scheme_preference.go
new file mode 100644
index 000000000..99e123661
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/auth_scheme_preference.go
@@ -0,0 +1,19 @@
+package config
+
+import "strings"
+
+func toAuthSchemePreferenceList(cfg string) []string {
+ if len(cfg) == 0 {
+ return nil
+ }
+ parts := strings.Split(cfg, ",")
+ ids := make([]string, 0, len(parts))
+
+ for _, p := range parts {
+ if id := strings.TrimSpace(p); len(id) > 0 {
+ ids = append(ids, id)
+ }
+ }
+
+ return ids
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
new file mode 100644
index 000000000..caa20a158
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
@@ -0,0 +1,235 @@
+package config
+
+import (
+ "context"
+ "os"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// defaultAWSConfigResolvers are a slice of functions that will resolve external
+// configuration values into AWS configuration values.
+//
+// This will setup the AWS configuration's Region,
+var defaultAWSConfigResolvers = []awsConfigResolver{
+ // Resolves the default configuration the SDK's aws.Config will be
+ // initialized with.
+ resolveDefaultAWSConfig,
+
+ // Sets the logger to be used. Could be user provided logger, and client
+ // logging mode.
+ resolveLogger,
+ resolveClientLogMode,
+
+ // Sets the HTTP client and configuration to use for making requests using
+ // the HTTP transport.
+ resolveHTTPClient,
+ resolveCustomCABundle,
+
+ // Sets the endpoint resolving behavior the API Clients will use for making
+ // requests to. Clients default to their own clients this allows overrides
+ // to be specified. The resolveEndpointResolver option is deprecated, but
+ // we still need to set it for backwards compatibility on config
+ // construction.
+ resolveEndpointResolver,
+ resolveEndpointResolverWithOptions,
+
+ // Sets the retry behavior API clients will use within their retry attempt
+ // middleware. Defaults to unset, allowing API clients to define their own
+ // retry behavior.
+ resolveRetryer,
+
+ // Sets the region the API Clients should use for making requests to.
+ resolveRegion,
+ resolveEC2IMDSRegion,
+ resolveDefaultRegion,
+
+ // Sets the additional set of middleware stack mutators that will custom
+ // API client request pipeline middleware.
+ resolveAPIOptions,
+
+ // Resolves the DefaultsMode that should be used by SDK clients. If this
+ // mode is set to DefaultsModeAuto.
+ //
+ // Comes after HTTPClient and CustomCABundle to ensure the HTTP client is
+ // configured if provided before invoking IMDS if mode is auto. Comes
+ // before resolving credentials so that those subsequent clients use the
+ // configured auto mode.
+ resolveDefaultsModeOptions,
+
+ // Sets the resolved credentials the API clients will use for
+ // authentication. Provides the SDK's default credential chain.
+ //
+ // Should probably be the last step in the resolve chain to ensure that all
+ // other configurations are resolved first in case downstream credentials
+ // implementations depend on or can be configured with earlier resolved
+ // configuration options.
+ resolveCredentials,
+
+ // Sets the resolved bearer authentication token API clients will use for
+ // httpBearerAuth authentication scheme.
+ resolveBearerAuthToken,
+
+ // Sets the sdk app ID if present in env var or shared config profile
+ resolveAppID,
+
+ resolveBaseEndpoint,
+
+ // Sets the DisableRequestCompression if present in env var or shared config profile
+ resolveDisableRequestCompression,
+
+ // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile
+ resolveRequestMinCompressSizeBytes,
+
+ // Sets the AccountIDEndpointMode if present in env var or shared config profile
+ resolveAccountIDEndpointMode,
+
+ // Sets the RequestChecksumCalculation if present in env var or shared config profile
+ resolveRequestChecksumCalculation,
+
+ // Sets the ResponseChecksumValidation if present in env var or shared config profile
+ resolveResponseChecksumValidation,
+
+ resolveInterceptors,
+
+ resolveAuthSchemePreference,
+
+ // Sets the ServiceOptions if present in LoadOptions
+ resolveServiceOptions,
+}
+
+// A Config represents a generic configuration value or set of values. This type
+// will be used by the AWSConfigResolvers to extract
+//
+// General the Config type will use type assertion against the Provider interfaces
+// to extract specific data from the Config.
+type Config interface{}
+
+// A loader is used to load external configuration data and returns it as
+// a generic Config type.
+//
+// The loader should return an error if it fails to load the external configuration
+// or the configuration data is malformed, or required components missing.
+type loader func(context.Context, configs) (Config, error)
+
+// An awsConfigResolver will extract configuration data from the configs slice
+// using the provider interfaces to extract specific functionality. The extracted
+// configuration values will be written to the AWS Config value.
+//
+// The resolver should return an error if it it fails to extract the data, the
+// data is malformed, or incomplete.
+type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error
+
+// configs is a slice of Config values. These values will be used by the
+// AWSConfigResolvers to extract external configuration values to populate the
+// AWS Config type.
+//
+// Use AppendFromLoaders to add additional external Config values that are
+// loaded from external sources.
+//
+// Use ResolveAWSConfig after external Config values have been added or loaded
+// to extract the loaded configuration values into the AWS Config.
+type configs []Config
+
+// AppendFromLoaders iterates over the slice of loaders passed in calling each
+// loader function in order. The external config value returned by the loader
+// will be added to the returned configs slice.
+//
+// If a loader returns an error this method will stop iterating and return
+// that error.
+func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) {
+ for _, fn := range loaders {
+ cfg, err := fn(ctx, cs)
+ if err != nil {
+ return nil, err
+ }
+
+ cs = append(cs, cfg)
+ }
+
+ return cs, nil
+}
+
+// ResolveAWSConfig returns a AWS configuration populated with values by calling
+// the resolvers slice passed in. Each resolver is called in order. Any resolver
+// may overwrite the AWS Configuration value of a previous resolver.
+//
+// If an resolver returns an error this method will return that error, and stop
+// iterating over the resolvers.
+func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) {
+ var cfg aws.Config
+
+ for _, fn := range resolvers {
+ if err := fn(ctx, &cfg, cs); err != nil {
+ return aws.Config{}, err
+ }
+ }
+
+ return cfg, nil
+}
+
+// ResolveConfig calls the provide function passing slice of configuration sources.
+// This implements the aws.ConfigResolver interface.
+func (cs configs) ResolveConfig(f func(configs []interface{}) error) error {
+ var cfgs []interface{}
+ for i := range cs {
+ cfgs = append(cfgs, cs[i])
+ }
+ return f(cfgs)
+}
+
+// LoadDefaultConfig reads the SDK's default external configurations, and
+// populates an AWS Config with the values from the external configurations.
+//
+// An optional variadic set of additional Config values can be provided as input
+// that will be prepended to the configs slice. Use this to add custom configuration.
+// The custom configurations must satisfy the respective providers for their data
+// or the custom data will be ignored by the resolvers and config loaders.
+//
+// cfg, err := config.LoadDefaultConfig( context.TODO(),
+// config.WithSharedConfigProfile("test-profile"),
+// )
+// if err != nil {
+// panic(fmt.Sprintf("failed loading config, %v", err))
+// }
+//
+// The default configuration sources are:
+// * Environment Variables
+// * Shared Configuration and Shared Credentials files.
+func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) {
+ var options LoadOptions
+ for _, optFn := range optFns {
+ if err := optFn(&options); err != nil {
+ return aws.Config{}, err
+ }
+ }
+
+ // assign Load Options to configs
+ var cfgCpy = configs{options}
+
+ cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options))
+ if err != nil {
+ return aws.Config{}, err
+ }
+
+ cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers)
+ if err != nil {
+ return aws.Config{}, err
+ }
+
+ return cfg, nil
+}
+
+func resolveConfigLoaders(options *LoadOptions) []loader {
+ loaders := make([]loader, 2)
+ loaders[0] = loadEnvConfig
+
+ // specification of a profile should cause a load failure if it doesn't exist
+ if os.Getenv(awsProfileEnv) != "" || options.SharedConfigProfile != "" {
+ loaders[1] = loadSharedConfig
+ } else {
+ loaders[1] = loadSharedConfigIgnoreNotExist
+ }
+
+ return loaders
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go
new file mode 100644
index 000000000..20b66367f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go
@@ -0,0 +1,47 @@
+package config
+
+import (
+ "context"
+ "os"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+)
+
+const execEnvVar = "AWS_EXECUTION_ENV"
+
+// DefaultsModeOptions is the set of options that are used to configure
+type DefaultsModeOptions struct {
+ // The SDK configuration defaults mode. Defaults to legacy if not specified.
+ //
+ // Supported modes are: auto, cross-region, in-region, legacy, mobile, standard
+ Mode aws.DefaultsMode
+
+ // The EC2 Instance Metadata Client that should be used when performing environment
+ // discovery when aws.DefaultsModeAuto is set.
+ //
+ // If not specified the SDK will construct a client if the instance metadata service has not been disabled by
+ // the AWS_EC2_METADATA_DISABLED environment variable.
+ IMDSClient *imds.Client
+}
+
+func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) {
+ getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{})
+ // honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection.
+ select {
+ case <-ctx.Done():
+ return aws.RuntimeEnvironment{}, err
+ default:
+ }
+
+ var imdsRegion string
+ if err == nil {
+ imdsRegion = getRegionOutput.Region
+ }
+
+ return aws.RuntimeEnvironment{
+ EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)),
+ Region: envConfig.Region,
+ EC2InstanceMetadataRegion: imdsRegion,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go
new file mode 100644
index 000000000..aab7164e2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go
@@ -0,0 +1,20 @@
+// Package config provides utilities for loading configuration from multiple
+// sources that can be used to configure the SDK's API clients, and utilities.
+//
+// The config package will load configuration from environment variables, AWS
+// shared configuration file (~/.aws/config), and AWS shared credentials file
+// (~/.aws/credentials).
+//
+// Use the LoadDefaultConfig to load configuration from all the SDK's supported
+// sources, and resolve credentials using the SDK's default credential chain.
+//
+// LoadDefaultConfig allows for a variadic list of additional Config sources that can
+// provide one or more configuration values which can be used to programmatically control the resolution
+// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK.
+// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will
+// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources
+// implement the same provider interface, priority will be handled by the order in which the sources were passed in.
+//
+// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider
+// interface. These helpers should be used for overriding configuration programmatically at runtime.
+package config
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
new file mode 100644
index 000000000..e932c63df
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
@@ -0,0 +1,932 @@
+package config
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
+)
+
+// CredentialsSourceName provides a name of the provider when config is
+// loaded from environment.
+const CredentialsSourceName = "EnvConfigCredentials"
+
+// Environment variables that will be read for configuration values.
+const (
+ awsAccessKeyIDEnv = "AWS_ACCESS_KEY_ID"
+ awsAccessKeyEnv = "AWS_ACCESS_KEY"
+
+ awsSecretAccessKeyEnv = "AWS_SECRET_ACCESS_KEY"
+ awsSecretKeyEnv = "AWS_SECRET_KEY"
+
+ awsSessionTokenEnv = "AWS_SESSION_TOKEN"
+
+ awsContainerCredentialsFullURIEnv = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+ awsContainerCredentialsRelativeURIEnv = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+ awsContainerAuthorizationTokenEnv = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+
+ awsRegionEnv = "AWS_REGION"
+ awsDefaultRegionEnv = "AWS_DEFAULT_REGION"
+
+ awsProfileEnv = "AWS_PROFILE"
+ awsDefaultProfileEnv = "AWS_DEFAULT_PROFILE"
+
+ awsSharedCredentialsFileEnv = "AWS_SHARED_CREDENTIALS_FILE"
+
+ awsConfigFileEnv = "AWS_CONFIG_FILE"
+
+ awsCABundleEnv = "AWS_CA_BUNDLE"
+
+ awsWebIdentityTokenFileEnv = "AWS_WEB_IDENTITY_TOKEN_FILE"
+
+ awsRoleARNEnv = "AWS_ROLE_ARN"
+ awsRoleSessionNameEnv = "AWS_ROLE_SESSION_NAME"
+
+ awsEnableEndpointDiscoveryEnv = "AWS_ENABLE_ENDPOINT_DISCOVERY"
+
+ awsS3UseARNRegionEnv = "AWS_S3_USE_ARN_REGION"
+
+ awsEc2MetadataServiceEndpointModeEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE"
+
+ awsEc2MetadataServiceEndpointEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT"
+
+ awsEc2MetadataDisabledEnv = "AWS_EC2_METADATA_DISABLED"
+ awsEc2MetadataV1DisabledEnv = "AWS_EC2_METADATA_V1_DISABLED"
+
+ awsS3DisableMultiRegionAccessPointsEnv = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS"
+
+ awsUseDualStackEndpointEnv = "AWS_USE_DUALSTACK_ENDPOINT"
+
+ awsUseFIPSEndpointEnv = "AWS_USE_FIPS_ENDPOINT"
+
+ awsDefaultsModeEnv = "AWS_DEFAULTS_MODE"
+
+ awsMaxAttemptsEnv = "AWS_MAX_ATTEMPTS"
+ awsRetryModeEnv = "AWS_RETRY_MODE"
+ awsSdkUaAppIDEnv = "AWS_SDK_UA_APP_ID"
+
+ awsIgnoreConfiguredEndpointURLEnv = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS"
+ awsEndpointURLEnv = "AWS_ENDPOINT_URL"
+
+ awsDisableRequestCompressionEnv = "AWS_DISABLE_REQUEST_COMPRESSION"
+ awsRequestMinCompressionSizeBytesEnv = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES"
+
+ awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH"
+
+ awsAccountIDEnv = "AWS_ACCOUNT_ID"
+ awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE"
+
+ awsRequestChecksumCalculation = "AWS_REQUEST_CHECKSUM_CALCULATION"
+ awsResponseChecksumValidation = "AWS_RESPONSE_CHECKSUM_VALIDATION"
+
+ awsAuthSchemePreferenceEnv = "AWS_AUTH_SCHEME_PREFERENCE"
+)
+
+var (
+ credAccessEnvKeys = []string{
+ awsAccessKeyIDEnv,
+ awsAccessKeyEnv,
+ }
+ credSecretEnvKeys = []string{
+ awsSecretAccessKeyEnv,
+ awsSecretKeyEnv,
+ }
+ regionEnvKeys = []string{
+ awsRegionEnv,
+ awsDefaultRegionEnv,
+ }
+ profileEnvKeys = []string{
+ awsProfileEnv,
+ awsDefaultProfileEnv,
+ }
+)
+
+// EnvConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type EnvConfig struct {
+ // Environment configuration values. If set both Access Key ID and Secret Access
+ // Key must be provided. Session Token and optionally also be provided, but is
+ // not required.
+ //
+ // # Access Key ID
+ // AWS_ACCESS_KEY_ID=AKID
+ // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+ //
+ // # Secret Access Key
+ // AWS_SECRET_ACCESS_KEY=SECRET
+ // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+ //
+ // # Session Token
+ // AWS_SESSION_TOKEN=TOKEN
+ Credentials aws.Credentials
+
+ // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials
+ // using the endpointcreds.Provider
+ ContainerCredentialsEndpoint string
+
+ // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve
+ // credentials from the container endpoint.
+ ContainerCredentialsRelativePath string
+
+ // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization
+ // header when attempting to retrieve credentials from the container credentials endpoint.
+ ContainerAuthorizationToken string
+
+ // Region value will instruct the SDK where to make service API requests to. If is
+ // not provided in the environment the region must be provided before a service
+ // client request is made.
+ //
+ // AWS_REGION=us-west-2
+ // AWS_DEFAULT_REGION=us-west-2
+ Region string
+
+ // Profile name the SDK should load use when loading shared configuration from the
+ // shared configuration files. If not provided "default" will be used as the
+ // profile name.
+ //
+ // AWS_PROFILE=my_profile
+ // AWS_DEFAULT_PROFILE=my_profile
+ SharedConfigProfile string
+
+ // Shared credentials file path can be set to instruct the SDK to use an alternate
+ // file for the shared credentials. If not set the file will be loaded from
+ // $HOME/.aws/credentials on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\credentials on Windows.
+ //
+ // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+ SharedCredentialsFile string
+
+ // Shared config file path can be set to instruct the SDK to use an alternate
+ // file for the shared config. If not set the file will be loaded from
+ // $HOME/.aws/config on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\config on Windows.
+ //
+ // AWS_CONFIG_FILE=$HOME/my_shared_config
+ SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+ // that the SDK will use instead of the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the config. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+
+ // Enables endpoint discovery via environment variables.
+ //
+ // AWS_ENABLE_ENDPOINT_DISCOVERY=true
+ EnableEndpointDiscovery aws.EndpointDiscoveryEnableState
+
+ // Specifies the WebIdentity token the SDK should use to assume a role
+ // with.
+ //
+ // AWS_WEB_IDENTITY_TOKEN_FILE=file_path
+ WebIdentityTokenFilePath string
+
+ // Specifies the IAM role arn to use when assuming an role.
+ //
+ // AWS_ROLE_ARN=role_arn
+ RoleARN string
+
+ // Specifies the IAM role session name to use when assuming a role.
+ //
+ // AWS_ROLE_SESSION_NAME=session_name
+ RoleSessionName string
+
+ // Specifies if the S3 service should allow ARNs to direct the region
+ // the client's requests are sent to.
+ //
+ // AWS_S3_USE_ARN_REGION=true
+ S3UseARNRegion *bool
+
+ // Specifies if the EC2 IMDS service client is enabled.
+ //
+ // AWS_EC2_METADATA_DISABLED=true
+ EC2IMDSClientEnableState imds.ClientEnableState
+
+ // Specifies if EC2 IMDSv1 fallback is disabled.
+ //
+ // AWS_EC2_METADATA_V1_DISABLED=true
+ EC2IMDSv1Disabled *bool
+
+ // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
+ //
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6
+ EC2IMDSEndpointMode imds.EndpointModeState
+
+ // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode.
+ //
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254
+ EC2IMDSEndpoint string
+
+ // Specifies if the S3 service should disable multi-region access points
+ // support.
+ //
+ // AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true
+ S3DisableMultiRegionAccessPoints *bool
+
+ // Specifies that SDK clients must resolve a dual-stack endpoint for
+ // services.
+ //
+ // AWS_USE_DUALSTACK_ENDPOINT=true
+ UseDualStackEndpoint aws.DualStackEndpointState
+
+ // Specifies that SDK clients must resolve a FIPS endpoint for
+ // services.
+ //
+ // AWS_USE_FIPS_ENDPOINT=true
+ UseFIPSEndpoint aws.FIPSEndpointState
+
+ // Specifies the SDK Defaults Mode used by services.
+ //
+ // AWS_DEFAULTS_MODE=standard
+ DefaultsMode aws.DefaultsMode
+
+ // Specifies the maximum number attempts an API client will call an
+ // operation that fails with a retryable error.
+ //
+ // AWS_MAX_ATTEMPTS=3
+ RetryMaxAttempts int
+
+ // Specifies the retry model the API client will be created with.
+ //
+ // aws_retry_mode=standard
+ RetryMode aws.RetryMode
+
+ // aws sdk app ID that can be added to user agent header string
+ AppID string
+
+ // Flag used to disable configured endpoints.
+ IgnoreConfiguredEndpoints *bool
+
+ // Value to contain configured endpoints to be propagated to
+ // corresponding endpoint resolution field.
+ BaseEndpoint string
+
+ // determine if request compression is allowed, default to false
+ // retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION
+ DisableRequestCompression *bool
+
+ // inclusive threshold request body size to trigger compression,
+ // default to 10240 and must be within 0 and 10485760 bytes inclusive
+ // retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES
+ RequestMinCompressSizeBytes *int64
+
+ // Whether S3Express auth is disabled.
+ //
+ // This will NOT prevent requests from being made to S3Express buckets, it
+ // will only bypass the modified endpoint routing and signing behaviors
+ // associated with the feature.
+ S3DisableExpressAuth *bool
+
+ // Indicates whether account ID will be required/ignored in endpoint2.0 routing
+ AccountIDEndpointMode aws.AccountIDEndpointMode
+
+ // Indicates whether request checksum should be calculated
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+
+ // Indicates whether response checksum should be validated
+ ResponseChecksumValidation aws.ResponseChecksumValidation
+
+ // Priority list of preferred auth scheme names (e.g. sigv4a).
+ AuthSchemePreference []string
+}
+
+// loadEnvConfig reads configuration values from the OS's environment variables.
+// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type.
+func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) {
+ return NewEnvConfig()
+}
+
+// NewEnvConfig retrieves the SDK's environment configuration.
+// See `EnvConfig` for the values that will be retrieved.
+func NewEnvConfig() (EnvConfig, error) {
+ var cfg EnvConfig
+
+ creds := aws.Credentials{
+ Source: CredentialsSourceName,
+ }
+ setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys)
+ setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys)
+ if creds.HasKeys() {
+ creds.AccountID = os.Getenv(awsAccountIDEnv)
+ creds.SessionToken = os.Getenv(awsSessionTokenEnv)
+ cfg.Credentials = creds
+ }
+
+ cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsFullURIEnv)
+ cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativeURIEnv)
+ cfg.ContainerAuthorizationToken = os.Getenv(awsContainerAuthorizationTokenEnv)
+
+ setStringFromEnvVal(&cfg.Region, regionEnvKeys)
+ setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys)
+
+ cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnv)
+ cfg.SharedConfigFile = os.Getenv(awsConfigFileEnv)
+
+ cfg.CustomCABundle = os.Getenv(awsCABundleEnv)
+
+ cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFileEnv)
+
+ cfg.RoleARN = os.Getenv(awsRoleARNEnv)
+ cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnv)
+
+ cfg.AppID = os.Getenv(awsSdkUaAppIDEnv)
+
+ if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompressionEnv}); err != nil {
+ return cfg, err
+ }
+ if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytesEnv}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil {
+ return cfg, err
+ }
+
+ if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnv}); err != nil {
+ return cfg, err
+ }
+
+ setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabledEnv})
+ if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnv}); err != nil {
+ return cfg, err
+ }
+ cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnv)
+ if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointsEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpointEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpointEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultsModeEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsMaxAttemptsEnv}); err != nil {
+ return cfg, err
+ }
+ if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryModeEnv}); err != nil {
+ return cfg, err
+ }
+
+ setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURLEnv})
+
+ if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpointURLEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setBoolPtrFromEnvVal(&cfg.S3DisableExpressAuth, []string{awsS3DisableExpressSessionAuthEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setRequestChecksumCalculationFromEnvVal(&cfg.RequestChecksumCalculation, []string{awsRequestChecksumCalculation}); err != nil {
+ return cfg, err
+ }
+ if err := setResponseChecksumValidationFromEnvVal(&cfg.ResponseChecksumValidation, []string{awsResponseChecksumValidation}); err != nil {
+ return cfg, err
+ }
+
+ cfg.AuthSchemePreference = toAuthSchemePreferenceList(os.Getenv(awsAuthSchemePreferenceEnv))
+
+ return cfg, nil
+}
+
+func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) {
+ if len(c.DefaultsMode) == 0 {
+ return "", false, nil
+ }
+ return c.DefaultsMode, true, nil
+}
+
+func (c EnvConfig) getAppID(context.Context) (string, bool, error) {
+ return c.AppID, len(c.AppID) > 0, nil
+}
+
+func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) {
+ if c.DisableRequestCompression == nil {
+ return false, false, nil
+ }
+ return *c.DisableRequestCompression, true, nil
+}
+
+func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) {
+ if c.RequestMinCompressSizeBytes == nil {
+ return 0, false, nil
+ }
+ return *c.RequestMinCompressSizeBytes, true, nil
+}
+
+func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) {
+ return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil
+}
+
+func (c EnvConfig) getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) {
+ return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil
+}
+
+func (c EnvConfig) getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) {
+ return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil
+}
+
+// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified,
+// and not 0.
+func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) {
+ if c.RetryMaxAttempts == 0 {
+ return 0, false, nil
+ }
+ return c.RetryMaxAttempts, true, nil
+}
+
+// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a
+// valid value.
+func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) {
+ if len(c.RetryMode) == 0 {
+ return "", false, nil
+ }
+ return c.RetryMode, true, nil
+}
+
+func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+ switch {
+ case strings.EqualFold(value, "true"):
+ *state = imds.ClientDisabled
+ case strings.EqualFold(value, "false"):
+ *state = imds.ClientEnabled
+ default:
+ continue
+ }
+ break
+ }
+}
+
+func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error {
+ for _, k := range keys {
+ if value := os.Getenv(k); len(value) > 0 {
+ if ok := mode.SetFromString(value); !ok {
+ return fmt.Errorf("invalid %s value: %s", k, value)
+ }
+ break
+ }
+ }
+ return nil
+}
+
+func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) {
+ for _, k := range keys {
+ if value := os.Getenv(k); len(value) > 0 {
+ *mode, err = aws.ParseRetryMode(value)
+ if err != nil {
+ return fmt.Errorf("invalid %s value, %w", k, err)
+ }
+ break
+ }
+ }
+ return nil
+}
+
+func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+ if err := mode.SetFromString(value); err != nil {
+ return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err)
+ }
+ }
+ return nil
+}
+
+func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ switch value {
+ case "preferred":
+ *m = aws.AccountIDEndpointModePreferred
+ case "required":
+ *m = aws.AccountIDEndpointModeRequired
+ case "disabled":
+ *m = aws.AccountIDEndpointModeDisabled
+ default:
+ return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value)
+ }
+ break
+ }
+ return nil
+}
+
+func setRequestChecksumCalculationFromEnvVal(m *aws.RequestChecksumCalculation, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ switch strings.ToLower(value) {
+ case checksumWhenSupported:
+ *m = aws.RequestChecksumCalculationWhenSupported
+ case checksumWhenRequired:
+ *m = aws.RequestChecksumCalculationWhenRequired
+ default:
+ return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value)
+ }
+ }
+ return nil
+}
+
+func setResponseChecksumValidationFromEnvVal(m *aws.ResponseChecksumValidation, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ switch strings.ToLower(value) {
+ case checksumWhenSupported:
+ *m = aws.ResponseChecksumValidationWhenSupported
+ case checksumWhenRequired:
+ *m = aws.ResponseChecksumValidationWhenRequired
+ default:
+ return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value)
+ }
+
+ }
+ return nil
+}
+
+// GetRegion returns the AWS Region if set in the environment. Returns an empty
+// string if not set.
+func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) {
+ if len(c.Region) == 0 {
+ return "", false, nil
+ }
+ return c.Region, true, nil
+}
+
+// GetSharedConfigProfile returns the shared config profile if set in the
+// environment. Returns an empty string if not set.
+func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) {
+ if len(c.SharedConfigProfile) == 0 {
+ return "", false, nil
+ }
+
+ return c.SharedConfigProfile, true, nil
+}
+
+// getSharedConfigFiles returns a slice of filenames set in the environment.
+//
+// Will return the filenames in the order of:
+// * Shared Config
+func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) {
+ var files []string
+ if v := c.SharedConfigFile; len(v) > 0 {
+ files = append(files, v)
+ }
+
+ if len(files) == 0 {
+ return nil, false, nil
+ }
+ return files, true, nil
+}
+
+// getSharedCredentialsFiles returns a slice of filenames set in the environment.
+//
+// Will return the filenames in the order of:
+// * Shared Credentials
+func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) {
+ var files []string
+ if v := c.SharedCredentialsFile; len(v) > 0 {
+ files = append(files, v)
+ }
+ if len(files) == 0 {
+ return nil, false, nil
+ }
+ return files, true, nil
+}
+
+// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was
+func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) {
+ if len(c.CustomCABundle) == 0 {
+ return nil, false, nil
+ }
+
+ b, err := os.ReadFile(c.CustomCABundle)
+ if err != nil {
+ return nil, false, err
+ }
+ return bytes.NewReader(b), true, nil
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) {
+ if c.IgnoreConfiguredEndpoints == nil {
+ return false, false, nil
+ }
+
+ return *c.IgnoreConfiguredEndpoints, true, nil
+}
+
+func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) {
+ return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil
+}
+
+// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use
+// with configured endpoints.
+func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) {
+ if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURLEnv, normalizeEnv(sdkID))); endpt != "" {
+ return endpt, true, nil
+ }
+ return "", false, nil
+}
+
+func normalizeEnv(sdkID string) string {
+ upper := strings.ToUpper(sdkID)
+ return strings.ReplaceAll(upper, " ", "_")
+}
+
+// GetS3UseARNRegion returns whether to allow ARNs to direct the region
+// the S3 client's requests are sent to.
+func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) {
+ if c.S3UseARNRegion == nil {
+ return false, false, nil
+ }
+
+ return *c.S3UseARNRegion, true, nil
+}
+
+// GetS3DisableMultiRegionAccessPoints returns whether to disable multi-region access point
+// support for the S3 client.
+func (c EnvConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) {
+ if c.S3DisableMultiRegionAccessPoints == nil {
+ return false, false, nil
+ }
+
+ return *c.S3DisableMultiRegionAccessPoints, true, nil
+}
+
+// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be
+// used for requests.
+func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) {
+ if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset {
+ return aws.DualStackEndpointStateUnset, false, nil
+ }
+
+ return c.UseDualStackEndpoint, true, nil
+}
+
+// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be
+// used for requests.
+func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) {
+ if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset {
+ return aws.FIPSEndpointStateUnset, false, nil
+ }
+
+ return c.UseFIPSEndpoint, true, nil
+}
+
+func setStringFromEnvVal(dst *string, keys []string) {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) > 0 {
+ *dst = v
+ break
+ }
+ }
+}
+
+func setIntFromEnvVal(dst *int, keys []string) error {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) > 0 {
+ i, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid value %s=%s, %w", k, v, err)
+ }
+ *dst = int(i)
+ break
+ }
+ }
+
+ return nil
+}
+
+func setBoolPtrFromEnvVal(dst **bool, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ if *dst == nil {
+ *dst = new(bool)
+ }
+
+ switch {
+ case strings.EqualFold(value, "false"):
+ **dst = false
+ case strings.EqualFold(value, "true"):
+ **dst = true
+ default:
+ return fmt.Errorf(
+ "invalid value for environment variable, %s=%s, need true or false",
+ k, value)
+ }
+ break
+ }
+
+ return nil
+}
+
+func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value)
+ } else if v < 0 || v > max {
+ return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v)
+ }
+ if *dst == nil {
+ *dst = new(int64)
+ }
+
+ **dst = v
+ break
+ }
+
+ return nil
+}
+
+func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue // skip if empty
+ }
+
+ switch {
+ case strings.EqualFold(value, endpointDiscoveryDisabled):
+ *dst = aws.EndpointDiscoveryDisabled
+ case strings.EqualFold(value, endpointDiscoveryEnabled):
+ *dst = aws.EndpointDiscoveryEnabled
+ case strings.EqualFold(value, endpointDiscoveryAuto):
+ *dst = aws.EndpointDiscoveryAuto
+ default:
+ return fmt.Errorf(
+ "invalid value for environment variable, %s=%s, need true, false or auto",
+ k, value)
+ }
+ }
+ return nil
+}
+
+func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue // skip if empty
+ }
+
+ switch {
+ case strings.EqualFold(value, "true"):
+ *dst = aws.DualStackEndpointStateEnabled
+ case strings.EqualFold(value, "false"):
+ *dst = aws.DualStackEndpointStateDisabled
+ default:
+ return fmt.Errorf(
+ "invalid value for environment variable, %s=%s, need true, false",
+ k, value)
+ }
+ }
+ return nil
+}
+
+func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue // skip if empty
+ }
+
+ switch {
+ case strings.EqualFold(value, "true"):
+ *dst = aws.FIPSEndpointStateEnabled
+ case strings.EqualFold(value, "false"):
+ *dst = aws.FIPSEndpointStateDisabled
+ default:
+ return fmt.Errorf(
+ "invalid value for environment variable, %s=%s, need true, false",
+ k, value)
+ }
+ }
+ return nil
+}
+
+// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting.
+func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) {
+ if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset {
+ return aws.EndpointDiscoveryUnset, false, nil
+ }
+
+ return c.EnableEndpointDiscovery, true, nil
+}
+
+// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface.
+func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) {
+ if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState {
+ return imds.ClientDefaultEnableState, false, nil
+ }
+
+ return c.EC2IMDSClientEnableState, true, nil
+}
+
+// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface.
+func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) {
+ if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset {
+ return imds.EndpointModeStateUnset, false, nil
+ }
+
+ return c.EC2IMDSEndpointMode, true, nil
+}
+
+// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface.
+func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) {
+ if len(c.EC2IMDSEndpoint) == 0 {
+ return "", false, nil
+ }
+
+ return c.EC2IMDSEndpoint, true, nil
+}
+
+// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option
+// resolver interface.
+func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) {
+ if c.EC2IMDSv1Disabled == nil {
+ return false, false
+ }
+
+ return *c.EC2IMDSv1Disabled, true
+}
+
+// GetS3DisableExpressAuth returns the configured value for
+// [EnvConfig.S3DisableExpressAuth].
+func (c EnvConfig) GetS3DisableExpressAuth() (value, ok bool) {
+ if c.S3DisableExpressAuth == nil {
+ return false, false
+ }
+
+ return *c.S3DisableExpressAuth, true
+}
+
+func (c EnvConfig) getAuthSchemePreference() ([]string, bool) {
+ if len(c.AuthSchemePreference) > 0 {
+ return c.AuthSchemePreference, true
+ }
+ return nil, false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go
new file mode 100644
index 000000000..654a7a77f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go
@@ -0,0 +1,4 @@
+package config
+
+//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go
+//go:generate gofmt -s -w ./
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
new file mode 100644
index 000000000..e27cb6f4d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package config
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.31.17"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
new file mode 100644
index 000000000..7cb5a1365
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
@@ -0,0 +1,1355 @@
+package config
+
+import (
+ "context"
+ "io"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/processcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ smithybearer "github.com/aws/smithy-go/auth/bearer"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// LoadOptionsFunc is a type alias for LoadOptions functional option
+type LoadOptionsFunc func(*LoadOptions) error
+
+// LoadOptions are discrete set of options that are valid for loading the
+// configuration
+type LoadOptions struct {
+
+ // Region is the region to send requests to.
+ Region string
+
+ // Credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // Token provider for authentication operations with bearer authentication.
+ BearerAuthTokenProvider smithybearer.TokenProvider
+
+ // HTTPClient the SDK's API clients will use to invoke HTTP requests.
+ HTTPClient HTTPClient
+
+ // EndpointResolver that can be used to provide or override an endpoint for
+ // the given service and region.
+ //
+ // See the `aws.EndpointResolver` documentation on usage.
+ //
+ // Deprecated: See EndpointResolverWithOptions
+ EndpointResolver aws.EndpointResolver
+
+ // EndpointResolverWithOptions that can be used to provide or override an
+ // endpoint for the given service and region.
+ //
+ // See the `aws.EndpointResolverWithOptions` documentation on usage.
+ EndpointResolverWithOptions aws.EndpointResolverWithOptions
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client
+ // will call an operation that fails with a retryable error.
+ //
+ // This value will only be used if Retryer option is nil.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry model the API client will be created with.
+ //
+ // This value will only be used if Retryer option is nil.
+ RetryMode aws.RetryMode
+
+ // Retryer is a function that provides a Retryer implementation. A Retryer
+ // guides how HTTP requests should be retried in case of recoverable
+ // failures.
+ //
+ // If not nil, RetryMaxAttempts, and RetryMode will be ignored.
+ Retryer func() aws.Retryer
+
+ // APIOptions provides the set of middleware mutations modify how the API
+ // client requests will be handled. This is useful for adding additional
+ // tracing data to a request, or changing behavior of the SDK's client.
+ APIOptions []func(*middleware.Stack) error
+
+ // Logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // ClientLogMode is used to configure the events that will be sent to the
+ // configured logger. This can be used to configure the logging of signing,
+ // retries, request, and responses of the SDK clients.
+ //
+ // See the ClientLogMode type documentation for the complete set of logging
+ // modes and available configuration.
+ ClientLogMode *aws.ClientLogMode
+
+ // SharedConfigProfile is the profile to be used when loading the SharedConfig
+ SharedConfigProfile string
+
+ // SharedConfigFiles is the slice of custom shared config files to use when
+ // loading the SharedConfig. A non-default profile used within config file
+ // must have name defined with prefix 'profile '. eg [profile xyz]
+ // indicates a profile with name 'xyz'. To read more on the format of the
+ // config file, please refer the documentation at
+ // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config
+ //
+ // If duplicate profiles are provided within the same, or across multiple
+ // shared config files, the next parsed profile will override only the
+ // properties that conflict with the previously defined profile. Note that
+ // if duplicate profiles are provided within the SharedCredentialsFiles and
+ // SharedConfigFiles, the properties defined in shared credentials file
+ // take precedence.
+ SharedConfigFiles []string
+
+ // SharedCredentialsFile is the slice of custom shared credentials files to
+ // use when loading the SharedConfig. The profile name used within
+ // credentials file must not prefix 'profile '. eg [xyz] indicates a
+ // profile with name 'xyz'. Profile declared as [profile xyz] will be
+ // ignored. To read more on the format of the credentials file, please
+ // refer the documentation at
+ // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds
+ //
+ // If duplicate profiles are provided with a same, or across multiple
+ // shared credentials files, the next parsed profile will override only
+ // properties that conflict with the previously defined profile. Note that
+ // if duplicate profiles are provided within the SharedCredentialsFiles and
+ // SharedConfigFiles, the properties defined in shared credentials file
+ // take precedence.
+ SharedCredentialsFiles []string
+
+ // CustomCABundle is CA bundle PEM bytes reader
+ CustomCABundle io.Reader
+
+ // DefaultRegion is the fall back region, used if a region was not resolved
+ // from other sources
+ DefaultRegion string
+
+ // UseEC2IMDSRegion indicates if SDK should retrieve the region
+ // from the EC2 Metadata service
+ UseEC2IMDSRegion *UseEC2IMDSRegion
+
+ // CredentialsCacheOptions is a function for setting the
+ // aws.CredentialsCacheOptions
+ CredentialsCacheOptions func(*aws.CredentialsCacheOptions)
+
+ // BearerAuthTokenCacheOptions is a function for setting the smithy-go
+ // auth/bearer#TokenCacheOptions
+ BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions)
+
+ // SSOTokenProviderOptions is a function for setting the
+ // credentials/ssocreds.SSOTokenProviderOptions
+ SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions)
+
+ // ProcessCredentialOptions is a function for setting
+ // the processcreds.Options
+ ProcessCredentialOptions func(*processcreds.Options)
+
+ // EC2RoleCredentialOptions is a function for setting
+ // the ec2rolecreds.Options
+ EC2RoleCredentialOptions func(*ec2rolecreds.Options)
+
+ // EndpointCredentialOptions is a function for setting
+ // the endpointcreds.Options
+ EndpointCredentialOptions func(*endpointcreds.Options)
+
+ // WebIdentityRoleCredentialOptions is a function for setting
+ // the stscreds.WebIdentityRoleOptions
+ WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions)
+
+ // AssumeRoleCredentialOptions is a function for setting the
+ // stscreds.AssumeRoleOptions
+ AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions)
+
+ // SSOProviderOptions is a function for setting
+ // the ssocreds.Options
+ SSOProviderOptions func(options *ssocreds.Options)
+
+ // LogConfigurationWarnings when set to true, enables logging
+ // configuration warnings
+ LogConfigurationWarnings *bool
+
+ // S3UseARNRegion specifies if the S3 service should allow ARNs to direct
+ // the region, the client's requests are sent to.
+ S3UseARNRegion *bool
+
+ // S3DisableMultiRegionAccessPoints specifies if the S3 service should disable
+ // the S3 Multi-Region access points feature.
+ S3DisableMultiRegionAccessPoints *bool
+
+ // EnableEndpointDiscovery specifies if endpoint discovery is enable for
+ // the client.
+ EnableEndpointDiscovery aws.EndpointDiscoveryEnableState
+
+ // Specifies if the EC2 IMDS service client is enabled.
+ //
+ // AWS_EC2_METADATA_DISABLED=true
+ EC2IMDSClientEnableState imds.ClientEnableState
+
+ // Specifies the EC2 Instance Metadata Service default endpoint selection
+ // mode (IPv4 or IPv6)
+ EC2IMDSEndpointMode imds.EndpointModeState
+
+ // Specifies the EC2 Instance Metadata Service endpoint to use. If
+ // specified it overrides EC2IMDSEndpointMode.
+ EC2IMDSEndpoint string
+
+ // Specifies that SDK clients must resolve a dual-stack endpoint for
+ // services.
+ UseDualStackEndpoint aws.DualStackEndpointState
+
+ // Specifies that SDK clients must resolve a FIPS endpoint for
+ // services.
+ UseFIPSEndpoint aws.FIPSEndpointState
+
+ // Specifies the SDK configuration mode for defaults.
+ DefaultsModeOptions DefaultsModeOptions
+
+ // The sdk app ID retrieved from env var or shared config to be added to request user agent header
+ AppID string
+
+ // Specifies whether an operation request could be compressed
+ DisableRequestCompression *bool
+
+ // The inclusive min bytes of a request body that could be compressed
+ RequestMinCompressSizeBytes *int64
+
+ // Whether S3 Express auth is disabled.
+ S3DisableExpressAuth *bool
+
+ // Whether account id should be built into endpoint resolution
+ AccountIDEndpointMode aws.AccountIDEndpointMode
+
+ // Specify if request checksum should be calculated
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+
+ // Specifies if response checksum should be validated
+ ResponseChecksumValidation aws.ResponseChecksumValidation
+
+ // Service endpoint override. This value is not necessarily final and is
+ // passed to the service's EndpointResolverV2 for further delegation.
+ BaseEndpoint string
+
+ // Registry of operation interceptors.
+ Interceptors smithyhttp.InterceptorRegistry
+
+ // Priority list of preferred auth scheme names (e.g. sigv4a).
+ AuthSchemePreference []string
+
+ // ServiceOptions provides service specific configuration options that will be applied
+ // when constructing clients for specific services. Each callback function receives the service ID
+ // and the service's Options struct, allowing for dynamic configuration based on the service.
+ ServiceOptions []func(string, any)
+}
+
+func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) {
+ if len(o.DefaultsModeOptions.Mode) == 0 {
+ return "", false, nil
+ }
+ return o.DefaultsModeOptions.Mode, true, nil
+}
+
+// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the
+// LoadOptions and not 0.
+func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) {
+ if o.RetryMaxAttempts == 0 {
+ return 0, false, nil
+ }
+ return o.RetryMaxAttempts, true, nil
+}
+
+// GetRetryMode returns the RetryMode specified in the LoadOptions.
+func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) {
+ if len(o.RetryMode) == 0 {
+ return "", false, nil
+ }
+ return o.RetryMode, true, nil
+}
+
+func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) {
+ if o.DefaultsModeOptions.IMDSClient == nil {
+ return nil, false, nil
+ }
+ return o.DefaultsModeOptions.IMDSClient, true, nil
+}
+
+// getRegion returns Region from config's LoadOptions
+func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) {
+ if len(o.Region) == 0 {
+ return "", false, nil
+ }
+
+ return o.Region, true, nil
+}
+
+// getAppID returns AppID from config's LoadOptions
+func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) {
+ return o.AppID, len(o.AppID) > 0, nil
+}
+
+// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions
+func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
+ if o.DisableRequestCompression == nil {
+ return false, false, nil
+ }
+ return *o.DisableRequestCompression, true, nil
+}
+
+// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions
+func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
+ if o.RequestMinCompressSizeBytes == nil {
+ return 0, false, nil
+ }
+ return *o.RequestMinCompressSizeBytes, true, nil
+}
+
+func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) {
+ return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil
+}
+
+func (o LoadOptions) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) {
+ return o.RequestChecksumCalculation, o.RequestChecksumCalculation > 0, nil
+}
+
+func (o LoadOptions) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) {
+ return o.ResponseChecksumValidation, o.ResponseChecksumValidation > 0, nil
+}
+
+func (o LoadOptions) getBaseEndpoint(context.Context) (string, bool, error) {
+ return o.BaseEndpoint, o.BaseEndpoint != "", nil
+}
+
+func (o LoadOptions) getServiceOptions(context.Context) ([]func(string, any), bool, error) {
+ return o.ServiceOptions, len(o.ServiceOptions) > 0, nil
+}
+
+// GetServiceBaseEndpoint satisfies (internal/configsources).ServiceBaseEndpointProvider.
+//
+// The sdkID value is unused because LoadOptions only supports setting a GLOBAL
+// endpoint override. In-code, per-service endpoint overrides are performed via
+// functional options in service client space.
+func (o LoadOptions) GetServiceBaseEndpoint(context.Context, string) (string, bool, error) {
+ return o.BaseEndpoint, o.BaseEndpoint != "", nil
+}
+
+// WithRegion is a helper function to construct functional options
+// that sets Region on config's LoadOptions. Setting the region to
+// an empty string, will result in the region value being ignored.
+// If multiple WithRegion calls are made, the last call overrides
+// the previous call values.
+func WithRegion(v string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Region = v
+ return nil
+ }
+}
+
+// WithAppID is a helper function to construct functional options
+// that sets AppID on config's LoadOptions.
+func WithAppID(ID string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.AppID = ID
+ return nil
+ }
+}
+
+// WithDisableRequestCompression is a helper function to construct functional options
+// that sets DisableRequestCompression on config's LoadOptions.
+func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if DisableRequestCompression == nil {
+ return nil
+ }
+ o.DisableRequestCompression = DisableRequestCompression
+ return nil
+ }
+}
+
+// WithRequestMinCompressSizeBytes is a helper function to construct functional options
+// that sets RequestMinCompressSizeBytes on config's LoadOptions.
+func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if RequestMinCompressSizeBytes == nil {
+ return nil
+ }
+ o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes
+ return nil
+ }
+}
+
+// WithAccountIDEndpointMode is a helper function to construct functional options
+// that sets AccountIDEndpointMode on config's LoadOptions
+func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if m != "" {
+ o.AccountIDEndpointMode = m
+ }
+ return nil
+ }
+}
+
+// WithRequestChecksumCalculation is a helper function to construct functional options
+// that sets RequestChecksumCalculation on config's LoadOptions
+func WithRequestChecksumCalculation(c aws.RequestChecksumCalculation) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if c > 0 {
+ o.RequestChecksumCalculation = c
+ }
+ return nil
+ }
+}
+
+// WithResponseChecksumValidation is a helper function to construct functional options
+// that sets ResponseChecksumValidation on config's LoadOptions
+func WithResponseChecksumValidation(v aws.ResponseChecksumValidation) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.ResponseChecksumValidation = v
+ return nil
+ }
+}
+
+// getDefaultRegion returns DefaultRegion from config's LoadOptions
+func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) {
+ if len(o.DefaultRegion) == 0 {
+ return "", false, nil
+ }
+
+ return o.DefaultRegion, true, nil
+}
+
+// WithDefaultRegion is a helper function to construct functional options
+// that sets a DefaultRegion on config's LoadOptions. Setting the default
+// region to an empty string, will result in the default region value
+// being ignored. If multiple WithDefaultRegion calls are made, the last
+// call overrides the previous call values. Note that both WithRegion and
+// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call
+// when resolving region.
+func WithDefaultRegion(v string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.DefaultRegion = v
+ return nil
+ }
+}
+
+// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions
+func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) {
+ if len(o.SharedConfigProfile) == 0 {
+ return "", false, nil
+ }
+
+ return o.SharedConfigProfile, true, nil
+}
+
+// WithSharedConfigProfile is a helper function to construct functional options
+// that sets SharedConfigProfile on config's LoadOptions. Setting the shared
+// config profile to an empty string, will result in the shared config profile
+// value being ignored.
+// If multiple WithSharedConfigProfile calls are made, the last call overrides
+// the previous call values.
+func WithSharedConfigProfile(v string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SharedConfigProfile = v
+ return nil
+ }
+}
+
+// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions
+func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) {
+ if o.SharedConfigFiles == nil {
+ return nil, false, nil
+ }
+
+ return o.SharedConfigFiles, true, nil
+}
+
+// WithSharedConfigFiles is a helper function to construct functional options
+// that sets slice of SharedConfigFiles on config's LoadOptions.
+// Setting the shared config files to an nil string slice, will result in the
+// shared config files value being ignored.
+// If multiple WithSharedConfigFiles calls are made, the last call overrides
+// the previous call values.
+func WithSharedConfigFiles(v []string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SharedConfigFiles = v
+ return nil
+ }
+}
+
+// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions
+func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) {
+ if o.SharedCredentialsFiles == nil {
+ return nil, false, nil
+ }
+
+ return o.SharedCredentialsFiles, true, nil
+}
+
+// WithSharedCredentialsFiles is a helper function to construct functional options
+// that sets slice of SharedCredentialsFiles on config's LoadOptions.
+// Setting the shared credentials files to an nil string slice, will result in the
+// shared credentials files value being ignored.
+// If multiple WithSharedCredentialsFiles calls are made, the last call overrides
+// the previous call values.
+func WithSharedCredentialsFiles(v []string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SharedCredentialsFiles = v
+ return nil
+ }
+}
+
+// getCustomCABundle returns CustomCABundle from LoadOptions
+func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) {
+ if o.CustomCABundle == nil {
+ return nil, false, nil
+ }
+
+ return o.CustomCABundle, true, nil
+}
+
+// WithCustomCABundle is a helper function to construct functional options
+// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle
+// to nil will result in custom CA Bundle value being ignored.
+// If multiple WithCustomCABundle calls are made, the last call overrides the
+// previous call values.
+func WithCustomCABundle(v io.Reader) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.CustomCABundle = v
+ return nil
+ }
+}
+
+// UseEC2IMDSRegion provides a regionProvider that retrieves the region
+// from the EC2 Metadata service.
+type UseEC2IMDSRegion struct {
+ // If unset will default to generic EC2 IMDS client.
+ Client *imds.Client
+}
+
+// getRegion attempts to retrieve the region from EC2 Metadata service.
+func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ client := p.Client
+ if client == nil {
+ client = imds.New(imds.Options{})
+ }
+
+ result, err := client.GetRegion(ctx, nil)
+ if err != nil {
+ return "", false, err
+ }
+ if len(result.Region) != 0 {
+ return result.Region, true, nil
+ }
+ return "", false, nil
+}
+
+// getEC2IMDSRegion returns the value of EC2 IMDS region.
+func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) {
+ if o.UseEC2IMDSRegion == nil {
+ return "", false, nil
+ }
+
+ return o.UseEC2IMDSRegion.getRegion(ctx)
+}
+
+// WithEC2IMDSRegion is a helper function to construct functional options
+// that enables resolving EC2IMDS region. The function takes
+// in a UseEC2IMDSRegion functional option, and can be used to set the
+// EC2IMDS client which will be used to resolve EC2IMDSRegion.
+// If no functional option is provided, an EC2IMDS client is built and used
+// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last
+// call overrides the previous call values. Note that the WithRegion calls takes
+// precedence over WithEC2IMDSRegion when resolving region.
+func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.UseEC2IMDSRegion = &UseEC2IMDSRegion{}
+
+ for _, fn := range fnOpts {
+ fn(o.UseEC2IMDSRegion)
+ }
+ return nil
+ }
+}
+
+// getCredentialsProvider returns the credentials value
+func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) {
+ if o.Credentials == nil {
+ return nil, false, nil
+ }
+
+ return o.Credentials, true, nil
+}
+
+// WithCredentialsProvider is a helper function to construct functional options
+// that sets Credential provider value on config's LoadOptions. If credentials
+// provider is set to nil, the credentials provider value will be ignored.
+// If multiple WithCredentialsProvider calls are made, the last call overrides
+// the previous call values.
+func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Credentials = v
+ return nil
+ }
+}
+
+// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions
+func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) {
+ if o.CredentialsCacheOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.CredentialsCacheOptions, true, nil
+}
+
+// WithCredentialsCacheOptions is a helper function to construct functional
+// options that sets a function to modify the aws.CredentialsCacheOptions the
+// aws.CredentialsCache will be configured with, if the CredentialsCache is used
+// by the configuration loader.
+//
+// If multiple WithCredentialsCacheOptions calls are made, the last call
+// overrides the previous call values.
+func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.CredentialsCacheOptions = v
+ return nil
+ }
+}
+
+// getBearerAuthTokenProvider returns the credentials value
+func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) {
+ if o.BearerAuthTokenProvider == nil {
+ return nil, false, nil
+ }
+
+ return o.BearerAuthTokenProvider, true, nil
+}
+
+// WithBearerAuthTokenProvider is a helper function to construct functional options
+// that sets Credential provider value on config's LoadOptions. If credentials
+// provider is set to nil, the credentials provider value will be ignored.
+// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides
+// the previous call values.
+func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.BearerAuthTokenProvider = v
+ return nil
+ }
+}
+
+// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions
+func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) {
+ if o.BearerAuthTokenCacheOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.BearerAuthTokenCacheOptions, true, nil
+}
+
+// WithBearerAuthTokenCacheOptions is a helper function to construct functional options
+// that sets a function to modify the TokenCacheOptions the smithy-go
+// auth/bearer#TokenCache will be configured with, if the TokenCache is used by
+// the configuration loader.
+//
+// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides
+// the previous call values.
+func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.BearerAuthTokenCacheOptions = v
+ return nil
+ }
+}
+
+// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions
+func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) {
+ if o.SSOTokenProviderOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.SSOTokenProviderOptions, true, nil
+}
+
+// WithSSOTokenProviderOptions is a helper function to construct functional
+// options that sets a function to modify the SSOtokenProviderOptions the SDK's
+// credentials/ssocreds#SSOProvider will be configured with, if the
+// SSOTokenProvider is used by the configuration loader.
+//
+// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides
+// the previous call values.
+func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SSOTokenProviderOptions = v
+ return nil
+ }
+}
+
+// getProcessCredentialOptions returns the wrapped function to set processcreds.Options
+func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) {
+ if o.ProcessCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.ProcessCredentialOptions, true, nil
+}
+
+// WithProcessCredentialOptions is a helper function to construct functional options
+// that sets a function to use processcreds.Options on config's LoadOptions.
+// If process credential options is set to nil, the process credential value will
+// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call
+// overrides the previous call values.
+func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.ProcessCredentialOptions = v
+ return nil
+ }
+}
+
+// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options
+func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) {
+ if o.EC2RoleCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.EC2RoleCredentialOptions, true, nil
+}
+
+// WithEC2RoleCredentialOptions is a helper function to construct functional options
+// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If
+// EC2 role credential options is set to nil, the EC2 role credential options value
+// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made,
+// the last call overrides the previous call values.
+func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EC2RoleCredentialOptions = v
+ return nil
+ }
+}
+
+// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options
+func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) {
+ if o.EndpointCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.EndpointCredentialOptions, true, nil
+}
+
+// WithEndpointCredentialOptions is a helper function to construct functional options
+// that sets a function to use endpointcreds.Options on config's LoadOptions. If
+// endpoint credential options is set to nil, the endpoint credential options
+// value will be ignored. If multiple WithEndpointCredentialOptions calls are made,
+// the last call overrides the previous call values.
+func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EndpointCredentialOptions = v
+ return nil
+ }
+}
+
+// getWebIdentityRoleCredentialOptions returns the wrapped function
+func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) {
+ if o.WebIdentityRoleCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.WebIdentityRoleCredentialOptions, true, nil
+}
+
+// WithWebIdentityRoleCredentialOptions is a helper function to construct
+// functional options that sets a function to use stscreds.WebIdentityRoleOptions
+// on config's LoadOptions. If web identity role credentials options is set to nil,
+// the web identity role credentials value will be ignored. If multiple
+// WithWebIdentityRoleCredentialOptions calls are made, the last call
+// overrides the previous call values.
+func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.WebIdentityRoleCredentialOptions = v
+ return nil
+ }
+}
+
+// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions
+func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) {
+ if o.AssumeRoleCredentialOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.AssumeRoleCredentialOptions, true, nil
+}
+
+// WithAssumeRoleCredentialOptions is a helper function to construct
+// functional options that sets a function to use stscreds.AssumeRoleOptions
+// on config's LoadOptions. If assume role credentials options is set to nil,
+// the assume role credentials value will be ignored. If multiple
+// WithAssumeRoleCredentialOptions calls are made, the last call overrides
+// the previous call values.
+func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.AssumeRoleCredentialOptions = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) {
+ if o.HTTPClient == nil {
+ return nil, false, nil
+ }
+
+ return o.HTTPClient, true, nil
+}
+
+// WithHTTPClient is a helper function to construct functional options
+// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil,
+// the HTTPClient value will be ignored.
+// If multiple WithHTTPClient calls are made, the last call overrides
+// the previous call values.
+func WithHTTPClient(v HTTPClient) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.HTTPClient = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) {
+ if o.APIOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.APIOptions, true, nil
+}
+
+// WithAPIOptions is a helper function to construct functional options
+// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the
+// APIOptions value is ignored. If multiple WithAPIOptions calls are
+// made, the last call overrides the previous call values.
+func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if v == nil {
+ return nil
+ }
+
+ o.APIOptions = append(o.APIOptions, v...)
+ return nil
+ }
+}
+
+func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) {
+ if o.RetryMaxAttempts == 0 {
+ return 0, false, nil
+ }
+
+ return o.RetryMaxAttempts, true, nil
+}
+
+// WithRetryMaxAttempts is a helper function to construct functional options that sets
+// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is
+// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides
+// the previous call values.
+//
+// Will be ignored of LoadOptions.Retryer or WithRetryer are used.
+func WithRetryMaxAttempts(v int) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.RetryMaxAttempts = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) {
+ if o.RetryMode == "" {
+ return "", false, nil
+ }
+
+ return o.RetryMode, true, nil
+}
+
+// WithRetryMode is a helper function to construct functional options that sets
+// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is
+// ignored. If multiple WithRetryMode calls are made, the last call overrides
+// the previous call values.
+//
+// Will be ignored of LoadOptions.Retryer or WithRetryer are used.
+func WithRetryMode(v aws.RetryMode) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.RetryMode = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) {
+ if o.Retryer == nil {
+ return nil, false, nil
+ }
+
+ return o.Retryer, true, nil
+}
+
+// WithRetryer is a helper function to construct functional options
+// that sets Retryer on LoadOptions. If Retryer is set to nil, the
+// Retryer value is ignored. If multiple WithRetryer calls are
+// made, the last call overrides the previous call values.
+func WithRetryer(v func() aws.Retryer) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Retryer = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) {
+ if o.EndpointResolver == nil {
+ return nil, false, nil
+ }
+
+ return o.EndpointResolver, true, nil
+}
+
+// WithEndpointResolver is a helper function to construct functional options
+// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil,
+// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls
+// are made, the last call overrides the previous call values.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. The API
+// for endpoint resolution is now unique to each service and is set via the
+// EndpointResolverV2 field on service client options. Use of
+// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you
+// from using any endpoint-related service features released after the
+// introduction of EndpointResolverV2. You may also encounter broken or
+// unexpected behavior when using the old global interface with services that
+// use many endpoint-related customizations such as S3.
+func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EndpointResolver = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) {
+ if o.EndpointResolverWithOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.EndpointResolverWithOptions, true, nil
+}
+
+// WithEndpointResolverWithOptions is a helper function to construct functional options
+// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil,
+// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls
+// are made, the last call overrides the previous call values.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [WithEndpointResolver].
+func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EndpointResolverWithOptions = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) {
+ if o.Logger == nil {
+ return nil, false, nil
+ }
+
+ return o.Logger, true, nil
+}
+
+// WithLogger is a helper function to construct functional options
+// that sets Logger on LoadOptions. If Logger is set to nil, the
+// Logger value will be ignored. If multiple WithLogger calls are made,
+// the last call overrides the previous call values.
+func WithLogger(v logging.Logger) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Logger = v
+ return nil
+ }
+}
+
+func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) {
+ if o.ClientLogMode == nil {
+ return 0, false, nil
+ }
+
+ return *o.ClientLogMode, true, nil
+}
+
+// WithClientLogMode is a helper function to construct functional options
+// that sets client log mode on LoadOptions. If client log mode is set to nil,
+// the client log mode value will be ignored. If multiple WithClientLogMode calls are made,
+// the last call overrides the previous call values.
+func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.ClientLogMode = &v
+ return nil
+ }
+}
+
+func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) {
+ if o.LogConfigurationWarnings == nil {
+ return false, false, nil
+ }
+ return *o.LogConfigurationWarnings, true, nil
+}
+
+// WithLogConfigurationWarnings is a helper function to construct
+// functional options that can be used to set LogConfigurationWarnings
+// on LoadOptions.
+//
+// If multiple WithLogConfigurationWarnings calls are made, the last call
+// overrides the previous call values.
+func WithLogConfigurationWarnings(v bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.LogConfigurationWarnings = &v
+ return nil
+ }
+}
+
+// GetS3UseARNRegion returns whether to allow ARNs to direct the region
+// the S3 client's requests are sent to.
+func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) {
+ if o.S3UseARNRegion == nil {
+ return false, false, nil
+ }
+ return *o.S3UseARNRegion, true, nil
+}
+
+// WithS3UseARNRegion is a helper function to construct functional options
+// that can be used to set S3UseARNRegion on LoadOptions.
+// If multiple WithS3UseARNRegion calls are made, the last call overrides
+// the previous call values.
+func WithS3UseARNRegion(v bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.S3UseARNRegion = &v
+ return nil
+ }
+}
+
+// GetS3DisableMultiRegionAccessPoints returns whether to disable
+// the S3 multi-region access points feature.
+func (o LoadOptions) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (v bool, found bool, err error) {
+ if o.S3DisableMultiRegionAccessPoints == nil {
+ return false, false, nil
+ }
+ return *o.S3DisableMultiRegionAccessPoints, true, nil
+}
+
+// WithS3DisableMultiRegionAccessPoints is a helper function to construct functional options
+// that can be used to set S3DisableMultiRegionAccessPoints on LoadOptions.
+// If multiple WithS3DisableMultiRegionAccessPoints calls are made, the last call overrides
+// the previous call values.
+func WithS3DisableMultiRegionAccessPoints(v bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.S3DisableMultiRegionAccessPoints = &v
+ return nil
+ }
+}
+
+// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set.
+func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) {
+ if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset {
+ return aws.EndpointDiscoveryUnset, false, nil
+ }
+ return o.EnableEndpointDiscovery, true, nil
+}
+
+// WithEndpointDiscovery is a helper function to construct functional options
+// that can be used to enable endpoint discovery on LoadOptions for supported clients.
+// If multiple WithEndpointDiscovery calls are made, the last call overrides
+// the previous call values.
+func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EnableEndpointDiscovery = v
+ return nil
+ }
+}
+
+// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions
+func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) {
+ if o.SSOProviderOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.SSOProviderOptions, true, nil
+}
+
+// WithSSOProviderOptions is a helper function to construct
+// functional options that sets a function to use ssocreds.Options
+// on config's LoadOptions. If the SSO credential provider options is set to nil,
+// the sso provider options value will be ignored. If multiple
+// WithSSOProviderOptions calls are made, the last call overrides
+// the previous call values.
+func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SSOProviderOptions = v
+ return nil
+ }
+}
+
+// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface.
+func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) {
+ if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState {
+ return imds.ClientDefaultEnableState, false, nil
+ }
+
+ return o.EC2IMDSClientEnableState, true, nil
+}
+
+// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface.
+func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) {
+ if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset {
+ return imds.EndpointModeStateUnset, false, nil
+ }
+
+ return o.EC2IMDSEndpointMode, true, nil
+}
+
+// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface.
+func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) {
+ if len(o.EC2IMDSEndpoint) == 0 {
+ return "", false, nil
+ }
+
+ return o.EC2IMDSEndpoint, true, nil
+}
+
+// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState.
+func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EC2IMDSClientEnableState = v
+ return nil
+ }
+}
+
+// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode.
+func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EC2IMDSEndpointMode = v
+ return nil
+ }
+}
+
+// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint.
+func WithEC2IMDSEndpoint(v string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.EC2IMDSEndpoint = v
+ return nil
+ }
+}
+
+// WithUseDualStackEndpoint is a helper function to construct
+// functional options that can be used to set UseDualStackEndpoint on LoadOptions.
+func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.UseDualStackEndpoint = v
+ return nil
+ }
+}
+
+// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be
+// used for requests.
+func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) {
+ if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset {
+ return aws.DualStackEndpointStateUnset, false, nil
+ }
+ return o.UseDualStackEndpoint, true, nil
+}
+
+// WithUseFIPSEndpoint is a helper function to construct
+// functional options that can be used to set UseFIPSEndpoint on LoadOptions.
+func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.UseFIPSEndpoint = v
+ return nil
+ }
+}
+
+// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be
+// used for requests.
+func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) {
+ if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset {
+ return aws.FIPSEndpointStateUnset, false, nil
+ }
+ return o.UseFIPSEndpoint, true, nil
+}
+
+// WithDefaultsMode sets the SDK defaults configuration mode to the value provided.
+//
+// Zero or more functional options can be provided to provide configuration options for performing
+// environment discovery when using aws.DefaultsModeAuto.
+func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc {
+ do := DefaultsModeOptions{
+ Mode: mode,
+ }
+ for _, fn := range optFns {
+ fn(&do)
+ }
+ return func(options *LoadOptions) error {
+ options.DefaultsModeOptions = do
+ return nil
+ }
+}
+
+// GetS3DisableExpressAuth returns the configured value for
+// [EnvConfig.S3DisableExpressAuth].
+func (o LoadOptions) GetS3DisableExpressAuth() (value, ok bool) {
+ if o.S3DisableExpressAuth == nil {
+ return false, false
+ }
+
+ return *o.S3DisableExpressAuth, true
+}
+
+// WithS3DisableExpressAuth sets [LoadOptions.S3DisableExpressAuth]
+// to the value provided.
+func WithS3DisableExpressAuth(v bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.S3DisableExpressAuth = &v
+ return nil
+ }
+}
+
+// WithBaseEndpoint is a helper function to construct functional options that
+// sets BaseEndpoint on config's LoadOptions. Empty values have no effect, and
+// subsequent calls to this API override previous ones.
+//
+// This is an in-code setting, therefore, any value set using this hook takes
+// precedence over and will override ALL environment and shared config
+// directives that set endpoint URLs. Functional options on service clients
+// have higher specificity, and functional options that modify the value of
+// BaseEndpoint on a client will take precedence over this setting.
+func WithBaseEndpoint(v string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.BaseEndpoint = v
+ return nil
+ }
+}
+
+// WithServiceOptions is a helper function to construct functional options
+// that sets ServiceOptions on config's LoadOptions.
+func WithServiceOptions(callbacks ...func(string, any)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.ServiceOptions = append(o.ServiceOptions, callbacks...)
+ return nil
+ }
+}
+
+// WithBeforeExecution adds the BeforeExecutionInterceptor to config.
+func WithBeforeExecution(i smithyhttp.BeforeExecutionInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.BeforeExecution = append(o.Interceptors.BeforeExecution, i)
+ return nil
+ }
+}
+
+// WithBeforeSerialization adds the BeforeSerializationInterceptor to config.
+func WithBeforeSerialization(i smithyhttp.BeforeSerializationInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.BeforeSerialization = append(o.Interceptors.BeforeSerialization, i)
+ return nil
+ }
+}
+
+// WithAfterSerialization adds the AfterSerializationInterceptor to config.
+func WithAfterSerialization(i smithyhttp.AfterSerializationInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.AfterSerialization = append(o.Interceptors.AfterSerialization, i)
+ return nil
+ }
+}
+
+// WithBeforeRetryLoop adds the BeforeRetryLoopInterceptor to config.
+func WithBeforeRetryLoop(i smithyhttp.BeforeRetryLoopInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.BeforeRetryLoop = append(o.Interceptors.BeforeRetryLoop, i)
+ return nil
+ }
+}
+
+// WithBeforeAttempt adds the BeforeAttemptInterceptor to config.
+func WithBeforeAttempt(i smithyhttp.BeforeAttemptInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.BeforeAttempt = append(o.Interceptors.BeforeAttempt, i)
+ return nil
+ }
+}
+
+// WithBeforeSigning adds the BeforeSigningInterceptor to config.
+func WithBeforeSigning(i smithyhttp.BeforeSigningInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.BeforeSigning = append(o.Interceptors.BeforeSigning, i)
+ return nil
+ }
+}
+
+// WithAfterSigning adds the AfterSigningInterceptor to config.
+func WithAfterSigning(i smithyhttp.AfterSigningInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.AfterSigning = append(o.Interceptors.AfterSigning, i)
+ return nil
+ }
+}
+
+// WithBeforeTransmit adds the BeforeTransmitInterceptor to config.
+func WithBeforeTransmit(i smithyhttp.BeforeTransmitInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.BeforeTransmit = append(o.Interceptors.BeforeTransmit, i)
+ return nil
+ }
+}
+
+// WithAfterTransmit adds the AfterTransmitInterceptor to config.
+func WithAfterTransmit(i smithyhttp.AfterTransmitInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.AfterTransmit = append(o.Interceptors.AfterTransmit, i)
+ return nil
+ }
+}
+
+// WithBeforeDeserialization adds the BeforeDeserializationInterceptor to config.
+func WithBeforeDeserialization(i smithyhttp.BeforeDeserializationInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.BeforeDeserialization = append(o.Interceptors.BeforeDeserialization, i)
+ return nil
+ }
+}
+
+// WithAfterDeserialization adds the AfterDeserializationInterceptor to config.
+func WithAfterDeserialization(i smithyhttp.AfterDeserializationInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.AfterDeserialization = append(o.Interceptors.AfterDeserialization, i)
+ return nil
+ }
+}
+
+// WithAfterAttempt adds the AfterAttemptInterceptor to config.
+func WithAfterAttempt(i smithyhttp.AfterAttemptInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.AfterAttempt = append(o.Interceptors.AfterAttempt, i)
+ return nil
+ }
+}
+
+// WithAfterExecution adds the AfterExecutionInterceptor to config.
+func WithAfterExecution(i smithyhttp.AfterExecutionInterceptor) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.Interceptors.AfterExecution = append(o.Interceptors.AfterExecution, i)
+ return nil
+ }
+}
+
+// WithAuthSchemePreference sets the priority order of auth schemes on config.
+//
+// Schemes are expressed as names e.g. sigv4a or sigv4.
+func WithAuthSchemePreference(schemeIDs ...string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.AuthSchemePreference = schemeIDs
+ return nil
+ }
+}
+
+func (o LoadOptions) getAuthSchemePreference() ([]string, bool) {
+ if len(o.AuthSchemePreference) > 0 {
+ return o.AuthSchemePreference, true
+ }
+ return nil, false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go
new file mode 100644
index 000000000..b629137c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go
@@ -0,0 +1,51 @@
+package config
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+)
+
+var lookupHostFn = net.LookupHost
+
+func isLoopbackHost(host string) (bool, error) {
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return ip.IsLoopback(), nil
+ }
+
+ // Host is not an ip, perform lookup
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+ if len(addrs) == 0 {
+ return false, fmt.Errorf("no addrs found for host, %s", host)
+ }
+
+ for _, addr := range addrs {
+ if !net.ParseIP(addr).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func validateLocalURL(v string) error {
+ u, err := url.Parse(v)
+ if err != nil {
+ return err
+ }
+
+ host := u.Hostname()
+ if len(host) == 0 {
+ return fmt.Errorf("unable to parse host from local HTTP cred provider URL")
+ } else if isLoopback, err := isLoopbackHost(host); err != nil {
+ return fmt.Errorf("failed to resolve host %q, %v", host, err)
+ } else if !isLoopback {
+ return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
new file mode 100644
index 000000000..18b9b5ad2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
@@ -0,0 +1,786 @@
+package config
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/processcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ smithybearer "github.com/aws/smithy-go/auth/bearer"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// sharedConfigProfileProvider provides access to the shared config profile
+// name external configuration value.
+type sharedConfigProfileProvider interface {
+ getSharedConfigProfile(ctx context.Context) (string, bool, error)
+}
+
+// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(sharedConfigProfileProvider); ok {
+ value, found, err = p.getSharedConfigProfile(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// sharedConfigFilesProvider provides access to the shared config filesnames
+// external configuration value.
+type sharedConfigFilesProvider interface {
+ getSharedConfigFiles(ctx context.Context) ([]string, bool, error)
+}
+
+// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(sharedConfigFilesProvider); ok {
+ value, found, err = p.getSharedConfigFiles(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+
+ return
+}
+
+// sharedCredentialsFilesProvider provides access to the shared credentials filesnames
+// external configuration value.
+type sharedCredentialsFilesProvider interface {
+ getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error)
+}
+
+// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(sharedCredentialsFilesProvider); ok {
+ value, found, err = p.getSharedCredentialsFiles(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+
+ return
+}
+
+// customCABundleProvider provides access to the custom CA bundle PEM bytes.
+type customCABundleProvider interface {
+ getCustomCABundle(ctx context.Context) (io.Reader, bool, error)
+}
+
+// getCustomCABundle searches the configs for a customCABundleProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(customCABundleProvider); ok {
+ value, found, err = p.getCustomCABundle(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+
+ return
+}
+
+// regionProvider provides access to the region external configuration value.
+type regionProvider interface {
+ getRegion(ctx context.Context) (string, bool, error)
+}
+
+// getRegion searches the configs for a regionProvider and returns the value
+// if found. Returns an error if a provider fails before a value is found.
+func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(regionProvider); ok {
+ value, found, err = p.getRegion(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// IgnoreConfiguredEndpointsProvider is needed to search for all providers
+// that provide a flag to disable configured endpoints.
+type IgnoreConfiguredEndpointsProvider interface {
+ GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error)
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok {
+ value, found, err = p.GetIgnoreConfiguredEndpoints(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+type baseEndpointProvider interface {
+ getBaseEndpoint(ctx context.Context) (string, bool, error)
+}
+
+func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(baseEndpointProvider); ok {
+ value, found, err = p.getBaseEndpoint(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+type servicesObjectProvider interface {
+ getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error)
+}
+
+func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(servicesObjectProvider); ok {
+ value, found, err = p.getServicesObject(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// appIDProvider provides access to the sdk app ID value
+type appIDProvider interface {
+ getAppID(ctx context.Context) (string, bool, error)
+}
+
+func getAppID(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(appIDProvider); ok {
+ value, found, err = p.getAppID(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// disableRequestCompressionProvider provides access to the DisableRequestCompression
+type disableRequestCompressionProvider interface {
+ getDisableRequestCompression(context.Context) (bool, bool, error)
+}
+
+func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(disableRequestCompressionProvider); ok {
+ value, found, err = p.getDisableRequestCompression(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes
+type requestMinCompressSizeBytesProvider interface {
+ getRequestMinCompressSizeBytes(context.Context) (int64, bool, error)
+}
+
+func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok {
+ value, found, err = p.getRequestMinCompressSizeBytes(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode
+type accountIDEndpointModeProvider interface {
+ getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error)
+}
+
+func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(accountIDEndpointModeProvider); ok {
+ value, found, err = p.getAccountIDEndpointMode(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// requestChecksumCalculationProvider provides access to the RequestChecksumCalculation
+type requestChecksumCalculationProvider interface {
+ getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error)
+}
+
+func getRequestChecksumCalculation(ctx context.Context, configs configs) (value aws.RequestChecksumCalculation, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(requestChecksumCalculationProvider); ok {
+ value, found, err = p.getRequestChecksumCalculation(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// responseChecksumValidationProvider provides access to the ResponseChecksumValidation
+type responseChecksumValidationProvider interface {
+ getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error)
+}
+
+func getResponseChecksumValidation(ctx context.Context, configs configs) (value aws.ResponseChecksumValidation, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(responseChecksumValidationProvider); ok {
+ value, found, err = p.getResponseChecksumValidation(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ec2IMDSRegionProvider provides access to the ec2 imds region
+// configuration value
+type ec2IMDSRegionProvider interface {
+ getEC2IMDSRegion(ctx context.Context) (string, bool, error)
+}
+
+// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and
+// returns the value if found. Returns an error if a provider fails before
+// a value is found.
+func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) {
+ for _, cfg := range configs {
+ if provider, ok := cfg.(ec2IMDSRegionProvider); ok {
+ region, found, err = provider.getEC2IMDSRegion(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// credentialsProviderProvider provides access to the credentials external
+// configuration value.
+type credentialsProviderProvider interface {
+ getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error)
+}
+
+// getCredentialsProvider searches the configs for a credentialsProviderProvider
+// and returns the value if found. Returns an error if a provider fails before a
+// value is found.
+func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) {
+ for _, cfg := range configs {
+ if provider, ok := cfg.(credentialsProviderProvider); ok {
+ p, found, err = provider.getCredentialsProvider(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// credentialsCacheOptionsProvider is an interface for retrieving a function for setting
+// the aws.CredentialsCacheOptions.
+type credentialsCacheOptionsProvider interface {
+ getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error)
+}
+
+// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting
+// the aws.CredentialsCacheOptions.
+func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) (
+ f func(*aws.CredentialsCacheOptions), found bool, err error,
+) {
+ for _, config := range configs {
+ if p, ok := config.(credentialsCacheOptionsProvider); ok {
+ f, found, err = p.getCredentialsCacheOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// bearerAuthTokenProviderProvider provides access to the bearer authentication
+// token external configuration value.
+type bearerAuthTokenProviderProvider interface {
+ getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error)
+}
+
+// getBearerAuthTokenProvider searches the config sources for a
+// bearerAuthTokenProviderProvider and returns the value if found. Returns an
+// error if a provider fails before a value is found.
+func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) {
+ for _, cfg := range configs {
+ if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok {
+ p, found, err = provider.getBearerAuthTokenProvider(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for
+// setting the smithy-go auth/bearer#TokenCacheOptions.
+type bearerAuthTokenCacheOptionsProvider interface {
+ getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error)
+}
+
+// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for
+// setting the smithy-go auth/bearer#TokenCacheOptions.
+func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) (
+ f func(*smithybearer.TokenCacheOptions), found bool, err error,
+) {
+ for _, config := range configs {
+ if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok {
+ f, found, err = p.getBearerAuthTokenCacheOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ssoTokenProviderOptionsProvider is an interface for retrieving a function for
+// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions.
+type ssoTokenProviderOptionsProvider interface {
+ getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error)
+}
+
+// getSSOTokenProviderOptions is an interface for retrieving a function for
+// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions.
+func getSSOTokenProviderOptions(ctx context.Context, configs configs) (
+ f func(*ssocreds.SSOTokenProviderOptions), found bool, err error,
+) {
+ for _, config := range configs {
+ if p, ok := config.(ssoTokenProviderOptionsProvider); ok {
+ f, found, err = p.getSSOTokenProviderOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ssoTokenProviderOptionsProvider
+
+// processCredentialOptions is an interface for retrieving a function for setting
+// the processcreds.Options.
+type processCredentialOptions interface {
+ getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error)
+}
+
+// getProcessCredentialOptions searches the slice of configs and returns the first function found
+func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(processCredentialOptions); ok {
+ f, found, err = p.getProcessCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ec2RoleCredentialOptionsProvider is an interface for retrieving a function
+// for setting the ec2rolecreds.Provider options.
+type ec2RoleCredentialOptionsProvider interface {
+ getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error)
+}
+
+// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found
+func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(ec2RoleCredentialOptionsProvider); ok {
+ f, found, err = p.getEC2RoleCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources
+type defaultRegionProvider interface {
+ getDefaultRegion(ctx context.Context) (string, bool, error)
+}
+
+// getDefaultRegion searches the slice of configs and returns the first fallback region found
+func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(defaultRegionProvider); ok {
+ value, found, err = p.getDefaultRegion(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// endpointCredentialOptionsProvider is an interface for retrieving a function for setting
+// the endpointcreds.ProviderOptions.
+type endpointCredentialOptionsProvider interface {
+ getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error)
+}
+
+// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found
+func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(endpointCredentialOptionsProvider); ok {
+ f, found, err = p.getEndpointCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting
+// the stscreds.WebIdentityRoleProvider.
+type webIdentityRoleCredentialOptionsProvider interface {
+ getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error)
+}
+
+// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found
+func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok {
+ f, found, err = p.getWebIdentityRoleCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting
+// the stscreds.AssumeRoleOptions.
+type assumeRoleCredentialOptionsProvider interface {
+ getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error)
+}
+
+// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found
+func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(assumeRoleCredentialOptionsProvider); ok {
+ f, found, err = p.getAssumeRoleCredentialOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// HTTPClient is an HTTP client implementation
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// httpClientProvider is an interface for retrieving HTTPClient
+type httpClientProvider interface {
+ getHTTPClient(ctx context.Context) (HTTPClient, bool, error)
+}
+
+// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs
+func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(httpClientProvider); ok {
+ client, found, err = p.getHTTPClient(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// apiOptionsProvider is an interface for retrieving APIOptions
+type apiOptionsProvider interface {
+ getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error)
+}
+
+// getAPIOptions searches the slice of configs and returns the APIOptions set on configs
+func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) {
+ for _, config := range configs {
+ if p, ok := config.(apiOptionsProvider); ok {
+ // retrieve APIOptions from configs and set it on cfg
+ apiOptions, found, err = p.getAPIOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source
+type endpointResolverProvider interface {
+ getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error)
+}
+
+// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used
+// to configure the aws.Config.EndpointResolver value.
+func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(endpointResolverProvider); ok {
+ f, found, err = p.getEndpointResolver(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source
+type endpointResolverWithOptionsProvider interface {
+ getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error)
+}
+
+// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used
+// to configure the aws.Config.EndpointResolver value.
+func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(endpointResolverWithOptionsProvider); ok {
+ f, found, err = p.getEndpointResolverWithOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// loggerProvider is an interface for retrieving a logging.Logger from a configuration source.
+type loggerProvider interface {
+ getLogger(ctx context.Context) (logging.Logger, bool, error)
+}
+
+// getLogger searches the provided config sources for a logging.Logger that can be used
+// to configure the aws.Config.Logger value.
+func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(loggerProvider); ok {
+ l, found, err = p.getLogger(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source.
+type clientLogModeProvider interface {
+ getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error)
+}
+
+func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(clientLogModeProvider); ok {
+ m, found, err = p.getClientLogMode(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// retryProvider is an configuration provider for custom Retryer.
+type retryProvider interface {
+ getRetryer(ctx context.Context) (func() aws.Retryer, bool, error)
+}
+
+func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(retryProvider); ok {
+ v, found, err = p.getRetryer(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// logConfigurationWarningsProvider is an configuration provider for
+// retrieving a boolean indicating whether configuration issues should
+// be logged when loading from config sources
+type logConfigurationWarningsProvider interface {
+ getLogConfigurationWarnings(ctx context.Context) (bool, bool, error)
+}
+
+func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(logConfigurationWarningsProvider); ok {
+ v, found, err = p.getLogConfigurationWarnings(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ssoCredentialOptionsProvider is an interface for retrieving a function for setting
+// the ssocreds.Options.
+type ssoCredentialOptionsProvider interface {
+ getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error)
+}
+
+func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(ssoCredentialOptionsProvider); ok {
+ v, found, err = p.getSSOProviderOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return v, found, err
+}
+
+type defaultsModeIMDSClientProvider interface {
+ getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error)
+}
+
+func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(defaultsModeIMDSClientProvider); ok {
+ v, found, err = p.getDefaultsModeIMDSClient(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return v, found, err
+}
+
+type defaultsModeProvider interface {
+ getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error)
+}
+
+func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(defaultsModeProvider); ok {
+ v, found, err = p.getDefaultsMode(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return v, found, err
+}
+
+type retryMaxAttemptsProvider interface {
+ GetRetryMaxAttempts(context.Context) (int, bool, error)
+}
+
+func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(retryMaxAttemptsProvider); ok {
+ v, found, err = p.GetRetryMaxAttempts(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return v, found, err
+}
+
+type retryModeProvider interface {
+ GetRetryMode(context.Context) (aws.RetryMode, bool, error)
+}
+
+func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(retryModeProvider); ok {
+ v, found, err = p.GetRetryMode(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return v, found, err
+}
+
+func getAuthSchemePreference(ctx context.Context, configs configs) ([]string, bool) {
+ type provider interface {
+ getAuthSchemePreference() ([]string, bool)
+ }
+
+ for _, cfg := range configs {
+ if p, ok := cfg.(provider); ok {
+ if v, ok := p.getAuthSchemePreference(); ok {
+ return v, true
+ }
+ }
+ }
+ return nil, false
+}
+
+type serviceOptionsProvider interface {
+ getServiceOptions(ctx context.Context) ([]func(string, any), bool, error)
+}
+
+func getServiceOptions(ctx context.Context, configs configs) (v []func(string, any), found bool, err error) {
+ for _, c := range configs {
+ if p, ok := c.(serviceOptionsProvider); ok {
+ v, found, err = p.getServiceOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return v, found, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
new file mode 100644
index 000000000..92a16d718
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
@@ -0,0 +1,444 @@
+package config
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ "github.com/aws/smithy-go/logging"
+)
+
+// resolveDefaultAWSConfig will write default configuration values into the cfg
+// value. It will write the default values, overwriting any previous value.
+//
+// This should be used as the first resolver in the slice of resolvers when
+// resolving external configuration.
+func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error {
+ var sources []interface{}
+ for _, s := range cfgs {
+ sources = append(sources, s)
+ }
+
+ *cfg = aws.Config{
+ Logger: logging.NewStandardLogger(os.Stderr),
+ ConfigSources: sources,
+ }
+ return nil
+}
+
+// resolveCustomCABundle extracts the first instance of a custom CA bundle filename
+// from the external configurations. It will update the HTTP Client's builder
+// to be configured with the custom CA bundle.
+//
+// Config provider used:
+// * customCABundleProvider
+func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error {
+ pemCerts, found, err := getCustomCABundle(ctx, cfgs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ if cfg.HTTPClient == nil {
+ cfg.HTTPClient = awshttp.NewBuildableClient()
+ }
+
+ trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient)
+ if !ok {
+ return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+
+ "has no WithTransportOptions, %T", cfg.HTTPClient)
+ }
+
+ var appendErr error
+ client := trOpts.WithTransportOptions(func(tr *http.Transport) {
+ if tr.TLSClientConfig == nil {
+ tr.TLSClientConfig = &tls.Config{}
+ }
+ if tr.TLSClientConfig.RootCAs == nil {
+ tr.TLSClientConfig.RootCAs = x509.NewCertPool()
+ }
+
+ b, err := ioutil.ReadAll(pemCerts)
+ if err != nil {
+ appendErr = fmt.Errorf("failed to read custom CA bundle PEM file")
+ }
+
+ if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) {
+ appendErr = fmt.Errorf("failed to load custom CA bundle PEM file")
+ }
+ })
+ if appendErr != nil {
+ return appendErr
+ }
+
+ cfg.HTTPClient = client
+ return err
+}
+
+// resolveRegion extracts the first instance of a Region from the configs slice.
+//
+// Config providers used:
+// * regionProvider
+func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
+ v, found, err := getRegion(ctx, configs)
+ if err != nil {
+ // TODO error handling, What is the best way to handle this?
+ // capture previous errors continue. error out if all errors
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Region = v
+ return nil
+}
+
+func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error {
+ var downcastCfgSources []interface{}
+ for _, cs := range configs {
+ downcastCfgSources = append(downcastCfgSources, interface{}(cs))
+ }
+
+ if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil {
+ cfg.BaseEndpoint = nil
+ return nil
+ }
+
+ v, found, err := getBaseEndpoint(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ return nil
+ }
+ cfg.BaseEndpoint = aws.String(v)
+ return nil
+}
+
+// resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var
+func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error {
+ ID, _, err := getAppID(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ cfg.AppID = ID
+ return nil
+}
+
+// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's
+// SharedConfig or EnvConfig
+func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error {
+ disable, _, err := getDisableRequestCompression(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ cfg.DisableRequestCompression = disable
+ return nil
+}
+
+// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's
+// SharedConfig or EnvConfig
+func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error {
+ minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs)
+ if err != nil {
+ return err
+ }
+ // must set a default min size 10240 if not configured
+ if !found {
+ minBytes = 10240
+ }
+ cfg.RequestMinCompressSizeBytes = minBytes
+ return nil
+}
+
+// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's
+// SharedConfig or EnvConfig
+func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error {
+ m, found, err := getAccountIDEndpointMode(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ m = aws.AccountIDEndpointModePreferred
+ }
+
+ cfg.AccountIDEndpointMode = m
+ return nil
+}
+
+// resolveRequestChecksumCalculation extracts the RequestChecksumCalculation from the configs slice's
+// SharedConfig or EnvConfig
+func resolveRequestChecksumCalculation(ctx context.Context, cfg *aws.Config, configs configs) error {
+ c, found, err := getRequestChecksumCalculation(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ c = aws.RequestChecksumCalculationWhenSupported
+ }
+ cfg.RequestChecksumCalculation = c
+ return nil
+}
+
+// resolveResponseValidation extracts the ResponseChecksumValidation from the configs slice's
+// SharedConfig or EnvConfig
+func resolveResponseChecksumValidation(ctx context.Context, cfg *aws.Config, configs configs) error {
+ c, found, err := getResponseChecksumValidation(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ c = aws.ResponseChecksumValidationWhenSupported
+ }
+ cfg.ResponseChecksumValidation = c
+ return nil
+}
+
+// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default
+// region if region had not been resolved from other sources.
+func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
+ if len(cfg.Region) > 0 {
+ return nil
+ }
+
+ v, found, err := getDefaultRegion(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Region = v
+
+ return nil
+}
+
+// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance
+// if one has not been resolved from other sources.
+func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error {
+ c, found, err := getHTTPClient(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.HTTPClient = c
+ return nil
+}
+
+// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options
+// if one has not been resolved from other sources.
+func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
+ o, found, err := getAPIOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.APIOptions = o
+
+ return nil
+}
+
+// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice
+// and sets the functions result on the aws.Config.EndpointResolver
+func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error {
+ endpointResolver, found, err := getEndpointResolver(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.EndpointResolver = endpointResolver
+
+ return nil
+}
+
+// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice
+// and sets the functions result on the aws.Config.EndpointResolver
+func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
+ endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.EndpointResolverWithOptions = endpointResolver
+
+ return nil
+}
+
+func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error {
+ logger, found, err := getLogger(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Logger = logger
+
+ return nil
+}
+
+func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error {
+ mode, found, err := getClientLogMode(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.ClientLogMode = mode
+
+ return nil
+}
+
+func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error {
+ retryer, found, err := getRetryer(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if found {
+ cfg.Retryer = retryer
+ return nil
+ }
+
+ // Only load the retry options if a custom retryer has not be specified.
+ if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil {
+ return err
+ }
+ return resolveRetryMode(ctx, cfg, configs)
+}
+
+func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
+ if len(cfg.Region) > 0 {
+ return nil
+ }
+
+ region, found, err := getEC2IMDSRegion(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.Region = region
+
+ return nil
+}
+
+func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
+ defaultsMode, found, err := getDefaultsMode(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ defaultsMode = aws.DefaultsModeLegacy
+ }
+
+ var environment aws.RuntimeEnvironment
+ if defaultsMode == aws.DefaultsModeAuto {
+ envConfig, _, _ := getAWSConfigSources(configs)
+
+ client, found, err := getDefaultsModeIMDSClient(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ client = imds.NewFromConfig(*cfg)
+ }
+
+ environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client)
+ if err != nil {
+ return err
+ }
+ }
+
+ cfg.DefaultsMode = defaultsMode
+ cfg.RuntimeEnvironment = environment
+
+ return nil
+}
+
+func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error {
+ maxAttempts, found, err := getRetryMaxAttempts(ctx, configs)
+ if err != nil || !found {
+ return err
+ }
+ cfg.RetryMaxAttempts = maxAttempts
+
+ return nil
+}
+
+func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error {
+ retryMode, found, err := getRetryMode(ctx, configs)
+ if err != nil || !found {
+ return err
+ }
+ cfg.RetryMode = retryMode
+
+ return nil
+}
+
+func resolveInterceptors(ctx context.Context, cfg *aws.Config, configs configs) error {
+ // LoadOptions is the only thing that you can really configure interceptors
+ // on so just check that directly.
+ for _, c := range configs {
+ if loadopts, ok := c.(LoadOptions); ok {
+ cfg.Interceptors = loadopts.Interceptors.Copy()
+ }
+ }
+ return nil
+}
+
+func resolveAuthSchemePreference(ctx context.Context, cfg *aws.Config, configs configs) error {
+ if pref, ok := getAuthSchemePreference(ctx, configs); ok {
+ cfg.AuthSchemePreference = pref
+ }
+ return nil
+}
+
+func resolveServiceOptions(ctx context.Context, cfg *aws.Config, configs configs) error {
+ serviceOptions, found, err := getServiceOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if !found {
+ return nil
+ }
+
+ cfg.ServiceOptions = serviceOptions
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go
new file mode 100644
index 000000000..a8ebb3c0a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go
@@ -0,0 +1,122 @@
+package config
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+ "github.com/aws/aws-sdk-go-v2/service/ssooidc"
+ smithybearer "github.com/aws/smithy-go/auth/bearer"
+)
+
+// resolveBearerAuthToken extracts a token provider from the config sources.
+//
+// If an explicit bearer authentication token provider is not found the
+// resolver will fallback to resolving token provider via other config sources
+// such as SharedConfig.
+func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error {
+ found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs)
+ if found || err != nil {
+ return err
+ }
+
+ return resolveBearerAuthTokenProviderChain(ctx, cfg, configs)
+}
+
+// resolveBearerAuthTokenProvider extracts the first instance of
+// BearerAuthTokenProvider from the config sources.
+//
+// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure
+// the Token is only refreshed when needed. This also protects the
+// TokenProvider so it can be used concurrently.
+//
+// Config providers used:
+// * bearerAuthTokenProviderProvider
+func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) {
+ tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs)
+ if !found || err != nil {
+ return false, err
+ }
+
+ cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache(
+ ctx, configs, tokenProvider)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
+
+func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) {
+ _, sharedConfig, _ := getAWSConfigSources(configs)
+
+ var provider smithybearer.TokenProvider
+
+ if sharedConfig.SSOSession != nil {
+ provider, err = resolveBearerAuthSSOTokenProvider(
+ ctx, cfg, sharedConfig.SSOSession, configs)
+ }
+
+ if err == nil && provider != nil {
+ cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache(
+ ctx, configs, provider)
+ }
+
+ return err
+}
+
+func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) {
+ ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err)
+ }
+
+ var optFns []func(*ssocreds.SSOTokenProviderOptions)
+ if found {
+ optFns = append(optFns, ssoTokenProviderOptionsFn)
+ }
+
+ cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err)
+ }
+
+ client := ssooidc.NewFromConfig(*cfg)
+ provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...)
+
+ return provider, nil
+}
+
+// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go
+// bearer/auth#TokenCache with the provided options if the provider is not
+// already a TokenCache.
+func wrapWithBearerAuthTokenCache(
+ ctx context.Context,
+ cfgs configs,
+ provider smithybearer.TokenProvider,
+ optFns ...func(*smithybearer.TokenCacheOptions),
+) (smithybearer.TokenProvider, error) {
+ _, ok := provider.(*smithybearer.TokenCache)
+ if ok {
+ return provider, nil
+ }
+
+ tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs)
+ if err != nil {
+ return nil, err
+ }
+
+ opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns))
+ opts = append(opts, func(o *smithybearer.TokenCacheOptions) {
+ o.RefreshBeforeExpires = 5 * time.Minute
+ o.RetrieveBearerTokenTimeout = 30 * time.Second
+ })
+ opts = append(opts, optFns...)
+ if optionsFound {
+ opts = append(opts, tokenCacheConfigOptions)
+ }
+
+ return smithybearer.NewTokenCache(provider, opts...), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go
new file mode 100644
index 000000000..b00259df0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go
@@ -0,0 +1,627 @@
+package config
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/processcreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+ "github.com/aws/aws-sdk-go-v2/credentials/stscreds"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ "github.com/aws/aws-sdk-go-v2/service/sso"
+ "github.com/aws/aws-sdk-go-v2/service/ssooidc"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+)
+
+const (
+ // valid credential source values
+ credSourceEc2Metadata = "Ec2InstanceMetadata"
+ credSourceEnvironment = "Environment"
+ credSourceECSContainer = "EcsContainer"
+ httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"
+)
+
+// direct representation of the IPv4 address for the ECS container
+// "169.254.170.2"
+var ecsContainerIPv4 net.IP = []byte{
+ 169, 254, 170, 2,
+}
+
+// direct representation of the IPv4 address for the EKS container
+// "169.254.170.23"
+var eksContainerIPv4 net.IP = []byte{
+ 169, 254, 170, 23,
+}
+
+// direct representation of the IPv6 address for the EKS container
+// "fd00:ec2::23"
+var eksContainerIPv6 net.IP = []byte{
+ 0xFD, 0, 0xE, 0xC2,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0x23,
+}
+
+var (
+ ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing
+)
+
+// resolveCredentials extracts a credential provider from slice of config
+// sources.
+//
+// If an explicit credential provider is not found the resolver will fallback
+// to resolving credentials by extracting a credential provider from EnvConfig
+// and SharedConfig.
+func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error {
+ found, err := resolveCredentialProvider(ctx, cfg, configs)
+ if found || err != nil {
+ return err
+ }
+
+ return resolveCredentialChain(ctx, cfg, configs)
+}
+
+// resolveCredentialProvider extracts the first instance of Credentials from the
+// config slices.
+//
+// The resolved CredentialProvider will be wrapped in a cache to ensure the
+// credentials are only refreshed when needed. This also protects the
+// credential provider to be used concurrently.
+//
+// Config providers used:
+// * credentialsProviderProvider
+func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) {
+ credProvider, found, err := getCredentialsProvider(ctx, configs)
+ if !found || err != nil {
+ return false, err
+ }
+
+ cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
+
+// resolveCredentialChain resolves a credential provider chain using EnvConfig
+// and SharedConfig if present in the slice of provided configs.
+//
+// The resolved CredentialProvider will be wrapped in a cache to ensure the
+// credentials are only refreshed when needed. This also protects the
+// credential provider to be used concurrently.
+func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) {
+ envConfig, sharedConfig, other := getAWSConfigSources(configs)
+
+ // When checking if a profile was specified programmatically we should only consider the "other"
+ // configuration sources that have been provided. This ensures we correctly honor the expected credential
+ // hierarchy.
+ _, sharedProfileSet, err := getSharedConfigProfile(ctx, other)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case sharedProfileSet:
+ ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
+ case envConfig.Credentials.HasKeys():
+ ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVars)
+ cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)}
+ case len(envConfig.WebIdentityTokenFilePath) > 0:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVarsSTSWebIDToken)
+ err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs)
+ default:
+ ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
+ }
+ if err != nil {
+ return err
+ }
+
+ // Wrap the resolved provider in a cache so the SDK will cache credentials.
+ cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (ctx2 context.Context, err error) {
+ switch {
+ case sharedConfig.Source != nil:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSourceProfile)
+ // Assume IAM role with credentials source from a different profile.
+ ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs)
+
+ case sharedConfig.Credentials.HasKeys():
+ // Static Credentials from Shared Config/Credentials file.
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfile)
+ cfg.Credentials = credentials.StaticCredentialsProvider{
+ Value: sharedConfig.Credentials,
+ Source: getCredentialSources(ctx),
+ }
+
+ case len(sharedConfig.CredentialSource) != 0:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileNamedProvider)
+ ctx, err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs)
+
+ case len(sharedConfig.WebIdentityTokenFile) != 0:
+ // Credentials from Assume Web Identity token require an IAM Role, and
+ // that roll will be assumed. May be wrapped with another assume role
+ // via SourceProfile.
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSTSWebIDToken)
+ return ctx, assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs)
+
+ case sharedConfig.hasSSOConfiguration():
+ if sharedConfig.hasLegacySSOConfiguration() {
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSOLegacy)
+ ctx = addCredentialSource(ctx, aws.CredentialSourceSSOLegacy)
+ } else {
+ ctx = addCredentialSource(ctx, aws.CredentialSourceSSO)
+ }
+ if sharedConfig.SSOSession != nil {
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSO)
+ }
+ err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs)
+
+ case len(sharedConfig.CredentialProcess) != 0:
+ // Get credentials from CredentialProcess
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileProcess)
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProcess)
+ err = processCredentials(ctx, cfg, sharedConfig, configs)
+
+ case len(envConfig.ContainerCredentialsRelativePath) != 0:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP)
+ err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs)
+
+ case len(envConfig.ContainerCredentialsEndpoint) != 0:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP)
+ err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs)
+
+ default:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS)
+ err = resolveEC2RoleCredentials(ctx, cfg, configs)
+ }
+ if err != nil {
+ return ctx, err
+ }
+
+ if len(sharedConfig.RoleARN) > 0 {
+ return ctx, credsFromAssumeRole(ctx, cfg, sharedConfig, configs)
+ }
+
+ return ctx, nil
+}
+
+func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error {
+ if err := sharedConfig.validateSSOConfiguration(); err != nil {
+ return err
+ }
+
+ var options []func(*ssocreds.Options)
+ v, found, err := getSSOProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ options = append(options, v)
+ }
+
+ cfgCopy := cfg.Copy()
+
+ options = append(options, func(o *ssocreds.Options) {
+ o.CredentialSources = getCredentialSources(ctx)
+ })
+
+ if sharedConfig.SSOSession != nil {
+ ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs)
+ if err != nil {
+ return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err)
+ }
+ var optFns []func(*ssocreds.SSOTokenProviderOptions)
+ if found {
+ optFns = append(optFns, ssoTokenProviderOptionsFn)
+ }
+ cfgCopy.Region = sharedConfig.SSOSession.SSORegion
+ cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name)
+ if err != nil {
+ return err
+ }
+ oidcClient := ssooidc.NewFromConfig(cfgCopy)
+ tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...)
+ options = append(options, func(o *ssocreds.Options) {
+ o.SSOTokenProvider = tokenProvider
+ o.CachedTokenFilepath = cachedPath
+ })
+ } else {
+ cfgCopy.Region = sharedConfig.SSORegion
+ }
+
+ cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...)
+
+ return nil
+}
+
+func ecsContainerURI(path string) string {
+ return fmt.Sprintf("%s%s", ecsContainerEndpoint, path)
+}
+
+func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error {
+ var opts []func(*processcreds.Options)
+
+ options, found, err := getProcessCredentialOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ opts = append(opts, options)
+ }
+
+ opts = append(opts, func(o *processcreds.Options) {
+ o.CredentialSources = getCredentialSources(ctx)
+ })
+
+ cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...)
+
+ return nil
+}
+
+// isAllowedHost allows host to be loopback or known ECS/EKS container IPs
+//
+// host can either be an IP address OR an unresolved hostname - resolution will
+// be automatically performed in the latter case
+func isAllowedHost(host string) (bool, error) {
+ if ip := net.ParseIP(host); ip != nil {
+ return isIPAllowed(ip), nil
+ }
+
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+
+ for _, addr := range addrs {
+ if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func isIPAllowed(ip net.IP) bool {
+ return ip.IsLoopback() ||
+ ip.Equal(ecsContainerIPv4) ||
+ ip.Equal(eksContainerIPv4) ||
+ ip.Equal(eksContainerIPv6)
+}
+
+func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error {
+ var resolveErr error
+
+ parsed, err := url.Parse(endpointURL)
+ if err != nil {
+ resolveErr = fmt.Errorf("invalid URL, %w", err)
+ } else {
+ host := parsed.Hostname()
+ if len(host) == 0 {
+ resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL")
+ } else if parsed.Scheme == "http" {
+ if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil {
+ resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, allowHostErr)
+ } else if !isAllowedHost {
+ resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed", host)
+ }
+ }
+ }
+
+ if resolveErr != nil {
+ return resolveErr
+ }
+
+ return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs)
+}
+
+func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error {
+ optFns := []func(*endpointcreds.Options){
+ func(options *endpointcreds.Options) {
+ if len(authToken) != 0 {
+ options.AuthorizationToken = authToken
+ }
+ if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" {
+ options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) {
+ var contents []byte
+ var err error
+ if contents, err = ioutil.ReadFile(authFilePath); err != nil {
+ return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err)
+ }
+ return string(contents), nil
+ })
+ }
+ options.APIOptions = cfg.APIOptions
+ if cfg.Retryer != nil {
+ options.Retryer = cfg.Retryer()
+ }
+ options.CredentialSources = getCredentialSources(ctx)
+ },
+ }
+
+ optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ optFns = append(optFns, optFn)
+ }
+
+ provider := endpointcreds.New(url, optFns...)
+
+ cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) {
+ options.ExpiryWindow = 5 * time.Minute
+ })
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (context.Context, error) {
+ switch sharedCfg.CredentialSource {
+ case credSourceEc2Metadata:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS)
+ return ctx, resolveEC2RoleCredentials(ctx, cfg, configs)
+
+ case credSourceEnvironment:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP)
+ cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)}
+
+ case credSourceECSContainer:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP)
+ if len(envConfig.ContainerCredentialsRelativePath) != 0 {
+ return ctx, resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs)
+ }
+ if len(envConfig.ContainerCredentialsEndpoint) != 0 {
+ return ctx, resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs)
+ }
+ return ctx, fmt.Errorf("EcsContainer was specified as the credential_source, but neither 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' or AWS_CONTAINER_CREDENTIALS_FULL_URI' was set")
+
+ default:
+ return ctx, fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment")
+ }
+
+ return ctx, nil
+}
+
+func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error {
+ optFns := make([]func(*ec2rolecreds.Options), 0, 2)
+
+ optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ optFns = append(optFns, optFn)
+ }
+
+ optFns = append(optFns, func(o *ec2rolecreds.Options) {
+ // Only define a client from config if not already defined.
+ if o.Client == nil {
+ o.Client = imds.NewFromConfig(*cfg)
+ }
+ o.CredentialSources = getCredentialSources(ctx)
+ })
+
+ provider := ec2rolecreds.New(optFns...)
+
+ cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) {
+ var (
+ envConfig *EnvConfig
+ sharedConfig *SharedConfig
+ other configs
+ )
+
+ for i := range cfgs {
+ switch c := cfgs[i].(type) {
+ case EnvConfig:
+ if envConfig == nil {
+ envConfig = &c
+ }
+ case *EnvConfig:
+ if envConfig == nil {
+ envConfig = c
+ }
+ case SharedConfig:
+ if sharedConfig == nil {
+ sharedConfig = &c
+ }
+ case *SharedConfig:
+ if envConfig == nil {
+ sharedConfig = c
+ }
+ default:
+ other = append(other, c)
+ }
+ }
+
+ if envConfig == nil {
+ envConfig = &EnvConfig{}
+ }
+
+ if sharedConfig == nil {
+ sharedConfig = &SharedConfig{}
+ }
+
+ return envConfig, sharedConfig, other
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a
+// session when the MFAToken option is not set when shared config is configured
+// load assume a role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Error is the error message
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+ return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error {
+ if len(filepath) == 0 {
+ return fmt.Errorf("token file path is not set")
+ }
+
+ optFns := []func(*stscreds.WebIdentityRoleOptions){
+ func(options *stscreds.WebIdentityRoleOptions) {
+ options.RoleSessionName = sessionName
+ },
+ }
+
+ optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if found {
+ optFns = append(optFns, optFn)
+ }
+
+ opts := stscreds.WebIdentityRoleOptions{
+ RoleARN: roleARN,
+ }
+
+ optFns = append(optFns, func(options *stscreds.WebIdentityRoleOptions) {
+ options.CredentialSources = getCredentialSources(ctx)
+ })
+
+ for _, fn := range optFns {
+ fn(&opts)
+ }
+
+ if len(opts.RoleARN) == 0 {
+ return fmt.Errorf("role ARN is not set")
+ }
+
+ client := opts.Client
+ if client == nil {
+ client = sts.NewFromConfig(*cfg)
+ }
+
+ provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...)
+
+ cfg.Credentials = provider
+
+ return nil
+}
+
+func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) {
+ // resolve credentials early
+ credentialSources := getCredentialSources(ctx)
+ optFns := []func(*stscreds.AssumeRoleOptions){
+ func(options *stscreds.AssumeRoleOptions) {
+ options.RoleSessionName = sharedCfg.RoleSessionName
+ if sharedCfg.RoleDurationSeconds != nil {
+ if *sharedCfg.RoleDurationSeconds/time.Minute > 15 {
+ options.Duration = *sharedCfg.RoleDurationSeconds
+ }
+ }
+ // Assume role with external ID
+ if len(sharedCfg.ExternalID) > 0 {
+ options.ExternalID = aws.String(sharedCfg.ExternalID)
+ }
+
+ // Assume role with MFA
+ if len(sharedCfg.MFASerial) != 0 {
+ options.SerialNumber = aws.String(sharedCfg.MFASerial)
+ }
+
+ // add existing credential chain
+ options.CredentialSources = credentialSources
+ },
+ }
+
+ optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs)
+ if err != nil {
+ return err
+ }
+ if found {
+ optFns = append(optFns, optFn)
+ }
+
+ {
+ // Synthesize options early to validate configuration errors sooner to ensure a token provider
+ // is present if the SerialNumber was set.
+ var o stscreds.AssumeRoleOptions
+ for _, fn := range optFns {
+ fn(&o)
+ }
+ if o.TokenProvider == nil && o.SerialNumber != nil {
+ return AssumeRoleTokenProviderNotSetError{}
+ }
+ }
+ cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...)
+
+ return nil
+}
+
+// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache
+// with the provided options if the provider is not already a
+// aws.CredentialsCache.
+func wrapWithCredentialsCache(
+ ctx context.Context,
+ cfgs configs,
+ provider aws.CredentialsProvider,
+ optFns ...func(options *aws.CredentialsCacheOptions),
+) (aws.CredentialsProvider, error) {
+ _, ok := provider.(*aws.CredentialsCache)
+ if ok {
+ return provider, nil
+ }
+
+ credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs)
+ if err != nil {
+ return nil, err
+ }
+
+ // force allocation of a new slice if the additional options are
+ // needed, to prevent overwriting the passed in slice of options.
+ optFns = optFns[:len(optFns):len(optFns)]
+ if optionsFound {
+ optFns = append(optFns, credCacheOptions)
+ }
+
+ return aws.NewCredentialsCache(provider, optFns...), nil
+}
+
+// credentialSource stores the chain of providers that was used to create an instance of
+// a credentials provider on the context
+type credentialSource struct{}
+
+func addCredentialSource(ctx context.Context, source aws.CredentialSource) context.Context {
+ existing, ok := ctx.Value(credentialSource{}).([]aws.CredentialSource)
+ if !ok {
+ existing = []aws.CredentialSource{source}
+ } else {
+ existing = append(existing, source)
+ }
+ return context.WithValue(ctx, credentialSource{}, existing)
+}
+
+func getCredentialSources(ctx context.Context) []aws.CredentialSource {
+ return ctx.Value(credentialSource{}).([]aws.CredentialSource)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
new file mode 100644
index 000000000..97be3f756
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
@@ -0,0 +1,1696 @@
+package config
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ "github.com/aws/aws-sdk-go-v2/internal/ini"
+ "github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
+ "github.com/aws/smithy-go/logging"
+ smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
+)
+
+const (
+ // Prefix to use for filtering profiles. The profile prefix should only
+ // exist in the shared config file, not the credentials file.
+ profilePrefix = `profile `
+
+ // Prefix to be used for SSO sections. These are supposed to only exist in
+ // the shared config file, not the credentials file.
+ ssoSectionPrefix = `sso-session `
+
+ // Prefix for services section. It is referenced in profile via the services
+ // parameter to configure clients for service-specific parameters.
+ servicesPrefix = `services `
+
+ // string equivalent for boolean
+ endpointDiscoveryDisabled = `false`
+ endpointDiscoveryEnabled = `true`
+ endpointDiscoveryAuto = `auto`
+
+ // Static Credentials group
+ accessKeyIDKey = `aws_access_key_id` // group required
+ secretAccessKey = `aws_secret_access_key` // group required
+ sessionTokenKey = `aws_session_token` // optional
+
+ // Assume Role Credentials group
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required
+ credentialSourceKey = `credential_source` // group required (or source_profile)
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+ roleDurationSecondsKey = "duration_seconds" // optional
+
+ // AWS Single Sign-On (AWS SSO) group
+ ssoSessionNameKey = "sso_session"
+
+ ssoRegionKey = "sso_region"
+ ssoStartURLKey = "sso_start_url"
+
+ ssoAccountIDKey = "sso_account_id"
+ ssoRoleNameKey = "sso_role_name"
+
+ // Additional Config fields
+ regionKey = `region`
+
+ // endpoint discovery group
+ enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
+
+ // External Credential process
+ credentialProcessKey = `credential_process` // optional
+
+ // Web Identity Token File
+ webIdentityTokenFileKey = `web_identity_token_file` // optional
+
+ // S3 ARN Region Usage
+ s3UseARNRegionKey = "s3_use_arn_region"
+
+ ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode"
+
+ ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint"
+
+ ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled"
+
+ // Use DualStack Endpoint Resolution
+ useDualStackEndpoint = "use_dualstack_endpoint"
+
+ // DefaultSharedConfigProfile is the default profile to be used when
+ // loading configuration from the config files if another profile name
+ // is not provided.
+ DefaultSharedConfigProfile = `default`
+
+ // S3 Disable Multi-Region AccessPoints
+ s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points`
+
+ useFIPSEndpointKey = "use_fips_endpoint"
+
+ defaultsModeKey = "defaults_mode"
+
+ // Retry options
+ retryMaxAttemptsKey = "max_attempts"
+ retryModeKey = "retry_mode"
+
+ caBundleKey = "ca_bundle"
+
+ sdkAppID = "sdk_ua_app_id"
+
+ ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls"
+
+ endpointURL = "endpoint_url"
+
+ servicesSectionKey = "services"
+
+ disableRequestCompression = "disable_request_compression"
+ requestMinCompressionSizeBytes = "request_min_compression_size_bytes"
+
+ s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth"
+
+ accountIDKey = "aws_account_id"
+ accountIDEndpointMode = "account_id_endpoint_mode"
+
+ requestChecksumCalculationKey = "request_checksum_calculation"
+ responseChecksumValidationKey = "response_checksum_validation"
+ checksumWhenSupported = "when_supported"
+ checksumWhenRequired = "when_required"
+
+ authSchemePreferenceKey = "auth_scheme_preference"
+)
+
+// defaultSharedConfigProfile allows for swapping the default profile for testing
+var defaultSharedConfigProfile = DefaultSharedConfigProfile
+
+// DefaultSharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func DefaultSharedCredentialsFilename() string {
+ return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials")
+}
+
+// DefaultSharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func DefaultSharedConfigFilename() string {
+ return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config")
+}
+
+// DefaultSharedConfigFiles is a slice of the default shared config files that
+// the will be used in order to load the SharedConfig.
+var DefaultSharedConfigFiles = []string{
+ DefaultSharedConfigFilename(),
+}
+
+// DefaultSharedCredentialsFiles is a slice of the default shared credentials
+// files that the will be used in order to load the SharedConfig.
+var DefaultSharedCredentialsFiles = []string{
+ DefaultSharedCredentialsFilename(),
+}
+
+// SSOSession provides the shared configuration parameters of the sso-session
+// section.
+type SSOSession struct {
+ Name string
+ SSORegion string
+ SSOStartURL string
+}
+
+func (s *SSOSession) setFromIniSection(section ini.Section) {
+ updateString(&s.Name, section, ssoSessionNameKey)
+ updateString(&s.SSORegion, section, ssoRegionKey)
+ updateString(&s.SSOStartURL, section, ssoStartURLKey)
+}
+
+// Services contains values configured in the services section
+// of the AWS configuration file.
+type Services struct {
+ // Services section values
+ // {"serviceId": {"key": "value"}}
+ // e.g. {"s3": {"endpoint_url": "example.com"}}
+ ServiceValues map[string]map[string]string
+}
+
+func (s *Services) setFromIniSection(section ini.Section) {
+ if s.ServiceValues == nil {
+ s.ServiceValues = make(map[string]map[string]string)
+ }
+ for _, service := range section.List() {
+ s.ServiceValues[service] = section.Map(service)
+ }
+}
+
+// SharedConfig represents the configuration fields of the SDK config files.
+type SharedConfig struct {
+ Profile string
+
+ // Credentials values from the config file. Both aws_access_key_id
+ // and aws_secret_access_key must be provided together in the same file
+ // to be considered valid. The values will be ignored if not a complete group.
+ // aws_session_token is an optional field that can be provided if both of the
+ // other two fields are also provided.
+ //
+ // aws_access_key_id
+ // aws_secret_access_key
+ // aws_session_token
+ Credentials aws.Credentials
+
+ CredentialSource string
+ CredentialProcess string
+ WebIdentityTokenFile string
+
+ // SSO session options
+ SSOSessionName string
+ SSOSession *SSOSession
+
+ // Legacy SSO session options
+ SSORegion string
+ SSOStartURL string
+
+ // SSO fields not used
+ SSOAccountID string
+ SSORoleName string
+
+ RoleARN string
+ ExternalID string
+ MFASerial string
+ RoleSessionName string
+ RoleDurationSeconds *time.Duration
+
+ SourceProfileName string
+ Source *SharedConfig
+
+ // Region is the region the SDK should use for looking up AWS service endpoints
+ // and signing requests.
+ //
+ // region = us-west-2
+ Region string
+
+ // EnableEndpointDiscovery can be enabled or disabled in the shared config
+ // by setting endpoint_discovery_enabled to true, or false respectively.
+ //
+ // endpoint_discovery_enabled = true
+ EnableEndpointDiscovery aws.EndpointDiscoveryEnableState
+
+ // Specifies if the S3 service should allow ARNs to direct the region
+ // the client's requests are sent to.
+ //
+ // s3_use_arn_region=true
+ S3UseARNRegion *bool
+
+ // Specifies the EC2 Instance Metadata Service default endpoint selection
+ // mode (IPv4 or IPv6)
+ //
+ // ec2_metadata_service_endpoint_mode=IPv6
+ EC2IMDSEndpointMode imds.EndpointModeState
+
+ // Specifies the EC2 Instance Metadata Service endpoint to use. If
+ // specified it overrides EC2IMDSEndpointMode.
+ //
+ // ec2_metadata_service_endpoint=http://fd00:ec2::254
+ EC2IMDSEndpoint string
+
+ // Specifies that IMDS clients should not fallback to IMDSv1 if token
+ // requests fail.
+ //
+ // ec2_metadata_v1_disabled=true
+ EC2IMDSv1Disabled *bool
+
+ // Specifies if the S3 service should disable support for Multi-Region
+ // access-points
+ //
+ // s3_disable_multiregion_access_points=true
+ S3DisableMultiRegionAccessPoints *bool
+
+ // Specifies that SDK clients must resolve a dual-stack endpoint for
+ // services.
+ //
+ // use_dualstack_endpoint=true
+ UseDualStackEndpoint aws.DualStackEndpointState
+
+ // Specifies that SDK clients must resolve a FIPS endpoint for
+ // services.
+ //
+ // use_fips_endpoint=true
+ UseFIPSEndpoint aws.FIPSEndpointState
+
+ // Specifies which defaults mode should be used by services.
+ //
+ // defaults_mode=standard
+ DefaultsMode aws.DefaultsMode
+
+ // Specifies the maximum number attempts an API client will call an
+ // operation that fails with a retryable error.
+ //
+ // max_attempts=3
+ RetryMaxAttempts int
+
+ // Specifies the retry model the API client will be created with.
+ //
+ // retry_mode=standard
+ RetryMode aws.RetryMode
+
+ // Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+ // that the SDK will use instead of the system's root CA bundle. Only use
+ // this if you want to configure the SDK to use a custom set of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport into the SDK's
+ // HTTP client. If the client's Transport is not a http.Transport an error
+ // will be returned. If the Transport's TLS config is set this option will
+ // cause the SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this
+ // setting. To use this option and custom HTTP client, the HTTP client
+ // needs to be provided when creating the config. Not the service client.
+ //
+ // ca_bundle=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+
+ // aws sdk app ID that can be added to user agent header string
+ AppID string
+
+ // Flag used to disable configured endpoints.
+ IgnoreConfiguredEndpoints *bool
+
+ // Value to contain configured endpoints to be propagated to
+ // corresponding endpoint resolution field.
+ BaseEndpoint string
+
+ // Services section config.
+ ServicesSectionName string
+ Services Services
+
+ // determine if request compression is allowed, default to false
+ // retrieved from config file's profile field disable_request_compression
+ DisableRequestCompression *bool
+
+ // inclusive threshold request body size to trigger compression,
+ // default to 10240 and must be within 0 and 10485760 bytes inclusive
+ // retrieved from config file's profile field request_min_compression_size_bytes
+ RequestMinCompressSizeBytes *int64
+
+ // Whether S3Express auth is disabled.
+ //
+ // This will NOT prevent requests from being made to S3Express buckets, it
+ // will only bypass the modified endpoint routing and signing behaviors
+ // associated with the feature.
+ S3DisableExpressAuth *bool
+
+ AccountIDEndpointMode aws.AccountIDEndpointMode
+
+ // RequestChecksumCalculation indicates if the request checksum should be calculated
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+
+ // ResponseChecksumValidation indicates if the response checksum should be validated
+ ResponseChecksumValidation aws.ResponseChecksumValidation
+
+ // Priority list of preferred auth scheme names (e.g. sigv4a).
+ AuthSchemePreference []string
+}
+
+func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) {
+ if len(c.DefaultsMode) == 0 {
+ return "", false, nil
+ }
+
+ return c.DefaultsMode, true, nil
+}
+
+// GetRetryMaxAttempts returns the maximum number of attempts an API client
+// created Retryer should attempt an operation call before failing.
+func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) {
+ if c.RetryMaxAttempts == 0 {
+ return 0, false, nil
+ }
+
+ return c.RetryMaxAttempts, true, nil
+}
+
+// GetRetryMode returns the model the API client should create its Retryer in.
+func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) {
+ if len(c.RetryMode) == 0 {
+ return "", false, nil
+ }
+
+ return c.RetryMode, true, nil
+}
+
+// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region
+// the client's requests are sent to.
+func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) {
+ if c.S3UseARNRegion == nil {
+ return false, false, nil
+ }
+
+ return *c.S3UseARNRegion, true, nil
+}
+
+// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set.
+func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) {
+ if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset {
+ return aws.EndpointDiscoveryUnset, false, nil
+ }
+
+ return c.EnableEndpointDiscovery, true, nil
+}
+
+// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region
+// access-points.
+func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) {
+ if c.S3DisableMultiRegionAccessPoints == nil {
+ return false, false, nil
+ }
+
+ return *c.S3DisableMultiRegionAccessPoints, true, nil
+}
+
+// GetRegion returns the region for the profile if a region is set.
+func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) {
+ if len(c.Region) == 0 {
+ return "", false, nil
+ }
+ return c.Region, true, nil
+}
+
+// GetCredentialsProvider returns the credentials for a profile if they were set.
+func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) {
+ return c.Credentials, true, nil
+}
+
+// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface.
+func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) {
+ if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset {
+ return imds.EndpointModeStateUnset, false, nil
+ }
+
+ return c.EC2IMDSEndpointMode, true, nil
+}
+
+// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface.
+func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) {
+ if len(c.EC2IMDSEndpoint) == 0 {
+ return "", false, nil
+ }
+
+ return c.EC2IMDSEndpoint, true, nil
+}
+
+// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option
+// resolver interface.
+func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) {
+ if c.EC2IMDSv1Disabled == nil {
+ return false, false
+ }
+
+ return *c.EC2IMDSv1Disabled, true
+}
+
+// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be
+// used for requests.
+func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) {
+ if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset {
+ return aws.DualStackEndpointStateUnset, false, nil
+ }
+
+ return c.UseDualStackEndpoint, true, nil
+}
+
+// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be
+// used for requests.
+func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) {
+ if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset {
+ return aws.FIPSEndpointStateUnset, false, nil
+ }
+
+ return c.UseFIPSEndpoint, true, nil
+}
+
+// GetS3DisableExpressAuth returns the configured value for
+// [SharedConfig.S3DisableExpressAuth].
+func (c SharedConfig) GetS3DisableExpressAuth() (value, ok bool) {
+ if c.S3DisableExpressAuth == nil {
+ return false, false
+ }
+
+ return *c.S3DisableExpressAuth, true
+}
+
+// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was
+func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) {
+ if len(c.CustomCABundle) == 0 {
+ return nil, false, nil
+ }
+
+ b, err := ioutil.ReadFile(c.CustomCABundle)
+ if err != nil {
+ return nil, false, err
+ }
+ return bytes.NewReader(b), true, nil
+}
+
+// getAppID returns the sdk app ID if set in shared config profile
+func (c SharedConfig) getAppID(context.Context) (string, bool, error) {
+ return c.AppID, len(c.AppID) > 0, nil
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) {
+ if c.IgnoreConfiguredEndpoints == nil {
+ return false, false, nil
+ }
+
+ return *c.IgnoreConfiguredEndpoints, true, nil
+}
+
+func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) {
+ return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil
+}
+
+// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use
+// with configured endpoints.
+func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) {
+ if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok {
+ if endpt, ok := service[endpointURL]; ok {
+ return endpt, true, nil
+ }
+ }
+ return "", false, nil
+}
+
+func normalizeShared(sdkID string) string {
+ lower := strings.ToLower(sdkID)
+ return strings.ReplaceAll(lower, " ", "_")
+}
+
+func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) {
+ return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil
+}
+
+// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the
+// addition of ignoring when none of the files exist or when the profile
+// is not found in any of the files.
+func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) {
+ cfg, err := loadSharedConfig(ctx, configs)
+ if err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistError); ok {
+ return SharedConfig{}, nil
+ }
+ return nil, err
+ }
+
+ return cfg, nil
+}
+
+// loadSharedConfig uses the configs passed in to load the SharedConfig from file
+// The file names and profile name are sourced from the configs.
+//
+// If profile name is not provided DefaultSharedConfigProfile (default) will
+// be used.
+//
+// If shared config filenames are not provided DefaultSharedConfigFiles will
+// be used.
+//
+// Config providers used:
+// * sharedConfigProfileProvider
+// * sharedConfigFilesProvider
+func loadSharedConfig(ctx context.Context, configs configs) (Config, error) {
+ var profile string
+ var configFiles []string
+ var credentialsFiles []string
+ var ok bool
+ var err error
+
+ profile, ok, err = getSharedConfigProfile(ctx, configs)
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ profile = defaultSharedConfigProfile
+ }
+
+ configFiles, ok, err = getSharedConfigFiles(ctx, configs)
+ if err != nil {
+ return nil, err
+ }
+
+ credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs)
+ if err != nil {
+ return nil, err
+ }
+
+ // setup logger if log configuration warning is seti
+ var logger logging.Logger
+ logWarnings, found, err := getLogConfigurationWarnings(ctx, configs)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+ if found && logWarnings {
+ logger, found, err = getLogger(ctx, configs)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+ if !found {
+ logger = logging.NewStandardLogger(os.Stderr)
+ }
+ }
+
+ return LoadSharedConfigProfile(ctx, profile,
+ func(o *LoadSharedConfigOptions) {
+ o.Logger = logger
+ o.ConfigFiles = configFiles
+ o.CredentialsFiles = credentialsFiles
+ },
+ )
+}
+
+// LoadSharedConfigOptions struct contains optional values that can be used to load the config.
+type LoadSharedConfigOptions struct {
+
+ // CredentialsFiles are the shared credentials files
+ CredentialsFiles []string
+
+ // ConfigFiles are the shared config files
+ ConfigFiles []string
+
+ // Logger is the logger used to log shared config behavior
+ Logger logging.Logger
+}
+
+// LoadSharedConfigProfile retrieves the configuration from the list of files
+// using the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of A's.
+//
+// If config files are not set, SDK will default to using a file at location `.aws/config` if present.
+// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present.
+// No default files are set, if files set to an empty slice.
+//
+// You can read more about shared config and credentials file location at
+// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location
+func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) {
+ var option LoadSharedConfigOptions
+ for _, fn := range optFns {
+ fn(&option)
+ }
+
+ if option.ConfigFiles == nil {
+ option.ConfigFiles = DefaultSharedConfigFiles
+ }
+
+ if option.CredentialsFiles == nil {
+ option.CredentialsFiles = DefaultSharedCredentialsFiles
+ }
+
+ // load shared configuration sections from shared configuration INI options
+ configSections, err := loadIniFiles(option.ConfigFiles)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ // check for profile prefix and drop duplicates or invalid profiles
+ err = processConfigSections(ctx, &configSections, option.Logger)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ // load shared credentials sections from shared credentials INI options
+ credentialsSections, err := loadIniFiles(option.CredentialsFiles)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ // check for profile prefix and drop duplicates or invalid profiles
+ err = processCredentialsSections(ctx, &credentialsSections, option.Logger)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ err = mergeSections(&configSections, credentialsSections)
+ if err != nil {
+ return SharedConfig{}, err
+ }
+
+ cfg := SharedConfig{}
+ profiles := map[string]struct{}{}
+
+ if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil {
+ return SharedConfig{}, err
+ }
+
+ return cfg, nil
+}
+
+func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error {
+ skipSections := map[string]struct{}{}
+
+ for _, section := range sections.List() {
+ if _, ok := skipSections[section]; ok {
+ continue
+ }
+
+ // drop sections from config file that do not have expected prefixes.
+ switch {
+ case strings.HasPrefix(section, profilePrefix):
+ // Rename sections to remove "profile " prefixing to match with
+ // credentials file. If default is already present, it will be
+ // dropped.
+ newName, err := renameProfileSection(section, sections, logger)
+ if err != nil {
+ return fmt.Errorf("failed to rename profile section, %w", err)
+ }
+ skipSections[newName] = struct{}{}
+
+ case strings.HasPrefix(section, ssoSectionPrefix):
+ case strings.HasPrefix(section, servicesPrefix):
+ case strings.EqualFold(section, "default"):
+ default:
+ // drop this section, as invalid profile name
+ sections.DeleteSection(section)
+
+ if logger != nil {
+ logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+
+ "For use within a shared configuration file, "+
+ "a non-default profile must have `profile ` "+
+ "prefixed to the profile name.",
+ section,
+ )
+ }
+ }
+ }
+ return nil
+}
+
+func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) {
+ v, ok := sections.GetSection(section)
+ if !ok {
+ return "", fmt.Errorf("error processing profiles within the shared configuration files")
+ }
+
+ // delete section with profile as prefix
+ sections.DeleteSection(section)
+
+ // set the value to non-prefixed name in sections.
+ section = strings.TrimPrefix(section, profilePrefix)
+ if sections.HasSection(section) {
+ oldSection, _ := sections.GetSection(section)
+ v.Logs = append(v.Logs,
+ fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+
+ "overriding non-default profile from %s",
+ v.SourceFile, oldSection.SourceFile))
+ sections.DeleteSection(section)
+ }
+
+ // assign non-prefixed name to section
+ v.Name = section
+ sections.SetSection(section, v)
+
+ return section, nil
+}
+
+func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error {
+ for _, section := range sections.List() {
+ // drop profiles with prefix for credential files
+ if strings.HasPrefix(section, profilePrefix) {
+ // drop this section, as invalid profile name
+ sections.DeleteSection(section)
+
+ if logger != nil {
+ logger.Logf(logging.Debug,
+ "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+
+ "for the shared credentials file.\n",
+ section,
+ )
+ }
+ }
+ }
+ return nil
+}
+
+func loadIniFiles(filenames []string) (ini.Sections, error) {
+ mergedSections := ini.NewSections()
+
+ for _, filename := range filenames {
+ sections, err := ini.OpenFile(filename)
+ var v *ini.UnableToReadFile
+ if ok := errors.As(err, &v); ok {
+ // Skip files which can't be opened and read for whatever reason.
+ // We treat such files as empty, and do not fall back to other locations.
+ continue
+ } else if err != nil {
+ return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+
+ // mergeSections into mergedSections
+ err = mergeSections(&mergedSections, sections)
+ if err != nil {
+ return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+ }
+
+ return mergedSections, nil
+}
+
+// mergeSections merges source section properties into destination section properties
+func mergeSections(dst *ini.Sections, src ini.Sections) error {
+ for _, sectionName := range src.List() {
+ srcSection, _ := src.GetSection(sectionName)
+
+ if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) ||
+ (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) {
+ srcSection.Errors = append(srcSection.Errors,
+ fmt.Errorf("partial credentials found for profile %v", sectionName))
+ }
+
+ if !dst.HasSection(sectionName) {
+ dst.SetSection(sectionName, srcSection)
+ continue
+ }
+
+ // merge with destination srcSection
+ dstSection, _ := dst.GetSection(sectionName)
+
+ // errors should be overriden if any
+ dstSection.Errors = srcSection.Errors
+
+ // Access key id update
+ if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) {
+ accessKey := srcSection.String(accessKeyIDKey)
+ secretKey := srcSection.String(secretAccessKey)
+
+ if dstSection.Has(accessKeyIDKey) {
+ dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey,
+ dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey]))
+ }
+
+ // update access key
+ v, err := ini.NewStringValue(accessKey)
+ if err != nil {
+ return fmt.Errorf("error merging access key, %w", err)
+ }
+ dstSection.UpdateValue(accessKeyIDKey, v)
+
+ // update secret key
+ v, err = ini.NewStringValue(secretKey)
+ if err != nil {
+ return fmt.Errorf("error merging secret key, %w", err)
+ }
+ dstSection.UpdateValue(secretAccessKey, v)
+
+ // update session token
+ if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil {
+ return err
+ }
+
+ // update source file to reflect where the static creds came from
+ dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey])
+ dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey])
+ }
+
+ stringKeys := []string{
+ roleArnKey,
+ sourceProfileKey,
+ credentialSourceKey,
+ externalIDKey,
+ mfaSerialKey,
+ roleSessionNameKey,
+ regionKey,
+ enableEndpointDiscoveryKey,
+ credentialProcessKey,
+ webIdentityTokenFileKey,
+ s3UseARNRegionKey,
+ s3DisableMultiRegionAccessPointsKey,
+ ec2MetadataServiceEndpointModeKey,
+ ec2MetadataServiceEndpointKey,
+ ec2MetadataV1DisabledKey,
+ useDualStackEndpoint,
+ useFIPSEndpointKey,
+ defaultsModeKey,
+ retryModeKey,
+ caBundleKey,
+ roleDurationSecondsKey,
+ retryMaxAttemptsKey,
+
+ ssoSessionNameKey,
+ ssoAccountIDKey,
+ ssoRegionKey,
+ ssoRoleNameKey,
+ ssoStartURLKey,
+
+ authSchemePreferenceKey,
+ }
+ for i := range stringKeys {
+ if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil {
+ return err
+ }
+ }
+
+ // set srcSection on dst srcSection
+ *dst = dst.SetSection(sectionName, dstSection)
+ }
+
+ return nil
+}
+
+func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error {
+ if srcSection.Has(key) {
+ srcValue := srcSection.String(key)
+ val, err := ini.NewStringValue(srcValue)
+ if err != nil {
+ return fmt.Errorf("error merging %s, %w", key, err)
+ }
+
+ if dstSection.Has(key) {
+ dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key,
+ dstSection.SourceFile[key], srcSection.SourceFile[key]))
+ }
+
+ dstSection.UpdateValue(key, val)
+ dstSection.UpdateSourceFile(key, srcSection.SourceFile[key])
+ }
+ return nil
+}
+
+func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string {
+ return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
+ "with a %v value found in a duplicate profile defined at file %v. \n",
+ sectionName, key, dstSourceFile, key, srcSourceFile)
+}
+
+// Returns an error if all of the files fail to load. If at least one file is
+// successfully loaded and contains the profile, no error will be returned.
+func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string,
+ sections ini.Sections, logger logging.Logger) error {
+ c.Profile = profile
+
+ section, ok := sections.GetSection(profile)
+ if !ok {
+ return SharedConfigProfileNotExistError{
+ Profile: profile,
+ }
+ }
+
+ // if logs are appended to the section, log them
+ if section.Logs != nil && logger != nil {
+ for _, log := range section.Logs {
+ logger.Logf(logging.Debug, log)
+ }
+ }
+
+ // set config from the provided INI section
+ err := c.setFromIniSection(profile, section)
+ if err != nil {
+ return fmt.Errorf("error fetching config from profile, %v, %w", profile, err)
+ }
+
+ if _, ok := profiles[profile]; ok {
+ // if this is the second instance of the profile the Assume Role
+ // options must be cleared because they are only valid for the
+ // first reference of a profile. The self linked instance of the
+ // profile only have credential provider options.
+ c.clearAssumeRoleOptions()
+ } else {
+ // First time a profile has been seen. Assert if the credential type
+ // requires a role ARN, the ARN is also set
+ if err := c.validateCredentialsConfig(profile); err != nil {
+ return err
+ }
+ }
+
+ // if not top level profile and has credentials, return with credentials.
+ if len(profiles) != 0 && c.Credentials.HasKeys() {
+ return nil
+ }
+
+ profiles[profile] = struct{}{}
+
+ // validate no colliding credentials type are present
+ if err := c.validateCredentialType(); err != nil {
+ return err
+ }
+
+ // Link source profiles for assume roles
+ if len(c.SourceProfileName) != 0 {
+ // Linked profile via source_profile ignore credential provider
+ // options, the source profile must provide the credentials.
+ c.clearCredentialOptions()
+
+ srcCfg := &SharedConfig{}
+ err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger)
+ if err != nil {
+ // SourceProfileName that doesn't exist is an error in configuration.
+ if _, ok := err.(SharedConfigProfileNotExistError); ok {
+ err = SharedConfigAssumeRoleError{
+ RoleARN: c.RoleARN,
+ Profile: c.SourceProfileName,
+ Err: err,
+ }
+ }
+ return err
+ }
+
+ if !srcCfg.hasCredentials() {
+ return SharedConfigAssumeRoleError{
+ RoleARN: c.RoleARN,
+ Profile: c.SourceProfileName,
+ }
+ }
+
+ c.Source = srcCfg
+ }
+
+ // If the profile contains an SSO session parameter, the session MUST exist
+ // as a section in the config file. Load the SSO session using the name
+ // provided. If the session section is not found or incomplete an error
+ // will be returned.
+ if c.hasSSOTokenProviderConfiguration() {
+ section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName))
+ if !ok {
+ return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName)
+ }
+ var ssoSession SSOSession
+ ssoSession.setFromIniSection(section)
+ ssoSession.Name = c.SSOSessionName
+ c.SSOSession = &ssoSession
+ }
+
+ if len(c.ServicesSectionName) > 0 {
+ if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok {
+ var svcs Services
+ svcs.setFromIniSection(section)
+ c.Services = svcs
+ }
+ }
+
+ return nil
+}
+
+// setFromIniSection loads the configuration from the profile section defined in
+// the provided INI file. A SharedConfig pointer type value is used so that
+// multiple config file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For example
+// if a config file only includes aws_access_key_id but no aws_secret_access_key
+// the aws_access_key_id will be ignored.
+func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error {
+ if len(section.Name) == 0 {
+ sources := make([]string, 0)
+ for _, v := range section.SourceFile {
+ sources = append(sources, v)
+ }
+
+ return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources)
+ }
+
+ if len(section.Errors) != 0 {
+ var errStatement string
+ for i, e := range section.Errors {
+ errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error())
+ }
+ return fmt.Errorf("Error using profile: \n %v", errStatement)
+ }
+
+ // Assume Role
+ updateString(&c.RoleARN, section, roleArnKey)
+ updateString(&c.ExternalID, section, externalIDKey)
+ updateString(&c.MFASerial, section, mfaSerialKey)
+ updateString(&c.RoleSessionName, section, roleSessionNameKey)
+ updateString(&c.SourceProfileName, section, sourceProfileKey)
+ updateString(&c.CredentialSource, section, credentialSourceKey)
+ updateString(&c.Region, section, regionKey)
+
+ // AWS Single Sign-On (AWS SSO)
+ // SSO session options
+ updateString(&c.SSOSessionName, section, ssoSessionNameKey)
+
+ // Legacy SSO session options
+ updateString(&c.SSORegion, section, ssoRegionKey)
+ updateString(&c.SSOStartURL, section, ssoStartURLKey)
+
+ // SSO fields not used
+ updateString(&c.SSOAccountID, section, ssoAccountIDKey)
+ updateString(&c.SSORoleName, section, ssoRoleNameKey)
+
+ // we're retaining a behavioral quirk with this field that existed before
+ // the removal of literal parsing for #2276:
+ // - if the key is missing, the config field will not be set
+ // - if the key is set to a non-numeric, the config field will be set to 0
+ if section.Has(roleDurationSecondsKey) {
+ if v, ok := section.Int(roleDurationSecondsKey); ok {
+ c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second)
+ } else {
+ c.RoleDurationSeconds = aws.Duration(time.Duration(0))
+ }
+ }
+
+ updateString(&c.CredentialProcess, section, credentialProcessKey)
+ updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey)
+
+ updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
+ updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey)
+ updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey)
+ updateBoolPtr(&c.S3DisableExpressAuth, section, s3DisableExpressSessionAuthKey)
+
+ if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err)
+ }
+ updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey)
+ updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey)
+
+ updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint)
+ updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey)
+
+ if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err)
+ }
+
+ if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err)
+ }
+ if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err)
+ }
+
+ updateString(&c.CustomCABundle, section, caBundleKey)
+
+ // user agent app ID added to request User-Agent header
+ updateString(&c.AppID, section, sdkAppID)
+
+ updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints)
+
+ updateString(&c.BaseEndpoint, section, endpointURL)
+
+ if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err)
+ }
+ if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err)
+ }
+
+ if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err)
+ }
+
+ if err := updateRequestChecksumCalculation(&c.RequestChecksumCalculation, section, requestChecksumCalculationKey); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", requestChecksumCalculationKey, err)
+ }
+ if err := updateResponseChecksumValidation(&c.ResponseChecksumValidation, section, responseChecksumValidationKey); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", responseChecksumValidationKey, err)
+ }
+
+ // Shared Credentials
+ creds := aws.Credentials{
+ AccessKeyID: section.String(accessKeyIDKey),
+ SecretAccessKey: section.String(secretAccessKey),
+ SessionToken: section.String(sessionTokenKey),
+ Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]),
+ AccountID: section.String(accountIDKey),
+ }
+
+ if creds.HasKeys() {
+ c.Credentials = creds
+ }
+
+ updateString(&c.ServicesSectionName, section, servicesSectionKey)
+
+ c.AuthSchemePreference = toAuthSchemePreferenceList(section.String(authSchemePreferenceKey))
+
+ return nil
+}
+
+func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v, ok := sec.Int(key)
+ if !ok {
+ return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key))
+ }
+ if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes {
+ return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v)
+ }
+ *bytes = new(int64)
+ **bytes = v
+ return nil
+}
+
+func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v := sec.String(key)
+ switch {
+ case v == "true":
+ *disable = new(bool)
+ **disable = true
+ case v == "false":
+ *disable = new(bool)
+ **disable = false
+ default:
+ return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v)
+ }
+ return nil
+}
+
+func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v := sec.String(key)
+ switch v {
+ case "preferred":
+ *m = aws.AccountIDEndpointModePreferred
+ case "required":
+ *m = aws.AccountIDEndpointModeRequired
+ case "disabled":
+ *m = aws.AccountIDEndpointModeDisabled
+ default:
+ return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v)
+ }
+
+ return nil
+}
+
+func updateRequestChecksumCalculation(m *aws.RequestChecksumCalculation, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v := sec.String(key)
+ switch strings.ToLower(v) {
+ case checksumWhenSupported:
+ *m = aws.RequestChecksumCalculationWhenSupported
+ case checksumWhenRequired:
+ *m = aws.RequestChecksumCalculationWhenRequired
+ default:
+ return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v)
+ }
+
+ return nil
+}
+
+func updateResponseChecksumValidation(m *aws.ResponseChecksumValidation, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v := sec.String(key)
+ switch strings.ToLower(v) {
+ case checksumWhenSupported:
+ *m = aws.ResponseChecksumValidationWhenSupported
+ case checksumWhenRequired:
+ *m = aws.ResponseChecksumValidationWhenRequired
+ default:
+ return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v)
+ }
+
+ return nil
+}
+
+func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
+ if c.RequestMinCompressSizeBytes == nil {
+ return 0, false, nil
+ }
+ return *c.RequestMinCompressSizeBytes, true, nil
+}
+
+func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
+ if c.DisableRequestCompression == nil {
+ return false, false, nil
+ }
+ return *c.DisableRequestCompression, true, nil
+}
+
+func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) {
+ return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil
+}
+
+func (c SharedConfig) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) {
+ return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil
+}
+
+func (c SharedConfig) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) {
+ return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil
+}
+
+func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error {
+ if !section.Has(key) {
+ return nil
+ }
+ value := section.String(key)
+ if ok := mode.SetFromString(value); !ok {
+ return fmt.Errorf("invalid value: %s", value)
+ }
+ return nil
+}
+
+func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) {
+ if !section.Has(key) {
+ return nil
+ }
+ value := section.String(key)
+ if *mode, err = aws.ParseRetryMode(value); err != nil {
+ return err
+ }
+ return nil
+}
+
+func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error {
+ if !section.Has(key) {
+ return nil
+ }
+ value := section.String(key)
+ return endpointMode.SetFromString(value)
+}
+
+func (c *SharedConfig) validateCredentialsConfig(profile string) error {
+ if err := c.validateCredentialsRequireARN(profile); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *SharedConfig) validateCredentialsRequireARN(profile string) error {
+ var credSource string
+
+ switch {
+ case len(c.SourceProfileName) != 0:
+ credSource = sourceProfileKey
+ case len(c.CredentialSource) != 0:
+ credSource = credentialSourceKey
+ case len(c.WebIdentityTokenFile) != 0:
+ credSource = webIdentityTokenFileKey
+ }
+
+ if len(credSource) != 0 && len(c.RoleARN) == 0 {
+ return CredentialRequiresARNError{
+ Type: credSource,
+ Profile: profile,
+ }
+ }
+
+ return nil
+}
+
+func (c *SharedConfig) validateCredentialType() error {
+ // Only one or no credential type can be defined.
+ if !oneOrNone(
+ len(c.SourceProfileName) != 0,
+ len(c.CredentialSource) != 0,
+ len(c.CredentialProcess) != 0,
+ len(c.WebIdentityTokenFile) != 0,
+ ) {
+ return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token")
+ }
+
+ return nil
+}
+
+func (c *SharedConfig) validateSSOConfiguration() error {
+ if c.hasSSOTokenProviderConfiguration() {
+ err := c.validateSSOTokenProviderConfiguration()
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ if c.hasLegacySSOConfiguration() {
+ err := c.validateLegacySSOConfiguration()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *SharedConfig) validateSSOTokenProviderConfiguration() error {
+ var missing []string
+
+ if len(c.SSOSessionName) == 0 {
+ missing = append(missing, ssoSessionNameKey)
+ }
+
+ if c.SSOSession == nil {
+ missing = append(missing, ssoSectionPrefix)
+ } else {
+ if len(c.SSOSession.SSORegion) == 0 {
+ missing = append(missing, ssoRegionKey)
+ }
+
+ if len(c.SSOSession.SSOStartURL) == 0 {
+ missing = append(missing, ssoStartURLKey)
+ }
+ }
+
+ if len(missing) > 0 {
+ return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+ c.Profile, strings.Join(missing, ", "))
+ }
+
+ if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion {
+ return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix)
+ }
+
+ if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL {
+ return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix)
+ }
+
+ return nil
+}
+
+func (c *SharedConfig) validateLegacySSOConfiguration() error {
+ var missing []string
+
+ if len(c.SSORegion) == 0 {
+ missing = append(missing, ssoRegionKey)
+ }
+
+ if len(c.SSOStartURL) == 0 {
+ missing = append(missing, ssoStartURLKey)
+ }
+
+ if len(c.SSOAccountID) == 0 {
+ missing = append(missing, ssoAccountIDKey)
+ }
+
+ if len(c.SSORoleName) == 0 {
+ missing = append(missing, ssoRoleNameKey)
+ }
+
+ if len(missing) > 0 {
+ return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+ c.Profile, strings.Join(missing, ", "))
+ }
+ return nil
+}
+
+func (c *SharedConfig) hasCredentials() bool {
+ switch {
+ case len(c.SourceProfileName) != 0:
+ case len(c.CredentialSource) != 0:
+ case len(c.CredentialProcess) != 0:
+ case len(c.WebIdentityTokenFile) != 0:
+ case c.hasSSOConfiguration():
+ case c.Credentials.HasKeys():
+ default:
+ return false
+ }
+
+ return true
+}
+
+func (c *SharedConfig) hasSSOConfiguration() bool {
+ return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration()
+}
+
+func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool {
+ return len(c.SSOSessionName) > 0
+}
+
+func (c *SharedConfig) hasLegacySSOConfiguration() bool {
+ return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0
+}
+
+func (c *SharedConfig) clearAssumeRoleOptions() {
+ c.RoleARN = ""
+ c.ExternalID = ""
+ c.MFASerial = ""
+ c.RoleSessionName = ""
+ c.SourceProfileName = ""
+}
+
+func (c *SharedConfig) clearCredentialOptions() {
+ c.CredentialSource = ""
+ c.CredentialProcess = ""
+ c.WebIdentityTokenFile = ""
+ c.Credentials = aws.Credentials{}
+ c.SSOAccountID = ""
+ c.SSORegion = ""
+ c.SSORoleName = ""
+ c.SSOStartURL = ""
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+ Filename string
+ Err error
+}
+
+// Unwrap returns the underlying error that caused the failure.
+func (e SharedConfigLoadError) Unwrap() error {
+ return e.Err
+}
+
+func (e SharedConfigLoadError) Error() string {
+ return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err)
+}
+
+// SharedConfigProfileNotExistError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistError struct {
+ Filename []string
+ Profile string
+ Err error
+}
+
+// Unwrap returns the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistError) Unwrap() error {
+ return e.Err
+}
+
+func (e SharedConfigProfileNotExistError) Error() string {
+ return fmt.Sprintf("failed to get shared config profile, %s", e.Profile)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+ Profile string
+ RoleARN string
+ Err error
+}
+
+// Unwrap returns the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) Unwrap() error {
+ return e.Err
+}
+
+func (e SharedConfigAssumeRoleError) Error() string {
+ return fmt.Sprintf("failed to load assume role %s, of profile %s, %v",
+ e.RoleARN, e.Profile, e.Err)
+}
+
+// CredentialRequiresARNError provides the error for shared config credentials
+// that are incorrectly configured in the shared config or credentials file.
+type CredentialRequiresARNError struct {
+ // type of credentials that were configured.
+ Type string
+
+ // Profile name the credentials were in.
+ Profile string
+}
+
+// Error satisfies the error interface.
+func (e CredentialRequiresARNError) Error() string {
+ return fmt.Sprintf(
+ "credential type %s requires role_arn, profile %s",
+ e.Type, e.Profile,
+ )
+}
+
+func oneOrNone(bs ...bool) bool {
+ var count int
+
+ for _, b := range bs {
+ if b {
+ count++
+ if count > 1 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// updateString will only update the dst with the value in the section key, key
+// is present in the section.
+func updateString(dst *string, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = section.String(key)
+}
+
+// updateInt will only update the dst with the value in the section key, key
+// is present in the section.
+//
+// Down casts the INI integer value from a int64 to an int, which could be
+// different bit size depending on platform.
+func updateInt(dst *int, section ini.Section, key string) error {
+ if !section.Has(key) {
+ return nil
+ }
+
+ v, ok := section.Int(key)
+ if !ok {
+ return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key))
+ }
+
+ *dst = int(v)
+ return nil
+}
+
+// updateBool will only update the dst with the value in the section key, key
+// is present in the section.
+func updateBool(dst *bool, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+
+ // retains pre-#2276 behavior where non-bool value would resolve to false
+ v, _ := section.Bool(key)
+ *dst = v
+}
+
+// updateBoolPtr will only update the dst with the value in the section key,
+// key is present in the section.
+func updateBoolPtr(dst **bool, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+
+ // retains pre-#2276 behavior where non-bool value would resolve to false
+ v, _ := section.Bool(key)
+ *dst = new(bool)
+ **dst = v
+}
+
+// updateEndpointDiscoveryType will only update the dst with the value in the section, if
+// a valid key and corresponding EndpointDiscoveryType is found.
+func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+
+ value := section.String(key)
+ if len(value) == 0 {
+ return
+ }
+
+ switch {
+ case strings.EqualFold(value, endpointDiscoveryDisabled):
+ *dst = aws.EndpointDiscoveryDisabled
+ case strings.EqualFold(value, endpointDiscoveryEnabled):
+ *dst = aws.EndpointDiscoveryEnabled
+ case strings.EqualFold(value, endpointDiscoveryAuto):
+ *dst = aws.EndpointDiscoveryAuto
+ }
+}
+
+// updateEndpointDiscoveryType will only update the dst with the value in the section, if
+// a valid key and corresponding EndpointDiscoveryType is found.
+func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+
+ // retains pre-#2276 behavior where non-bool value would resolve to false
+ if v, _ := section.Bool(key); v {
+ *dst = aws.DualStackEndpointStateEnabled
+ } else {
+ *dst = aws.DualStackEndpointStateDisabled
+ }
+
+ return
+}
+
+// updateEndpointDiscoveryType will only update the dst with the value in the section, if
+// a valid key and corresponding EndpointDiscoveryType is found.
+func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+
+ // retains pre-#2276 behavior where non-bool value would resolve to false
+ if v, _ := section.Bool(key); v {
+ *dst = aws.FIPSEndpointStateEnabled
+ } else {
+ *dst = aws.FIPSEndpointStateDisabled
+ }
+
+ return
+}
+
+func (c SharedConfig) getAuthSchemePreference() ([]string, bool) {
+ if len(c.AuthSchemePreference) > 0 {
+ return c.AuthSchemePreference, true
+ }
+ return nil, false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
new file mode 100644
index 000000000..463dce685
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
@@ -0,0 +1,865 @@
+# v1.18.21 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.18.20 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.19 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.18 (2025-10-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.17 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.16 (2025-09-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.15 (2025-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.14 (2025-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.13 (2025-09-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.12 (2025-09-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.11 (2025-09-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.10 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.9 (2025-08-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.8 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2025-08-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.6 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2025-08-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.4 (2025-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2025-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.2 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2025-07-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.71 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.70 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.69 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.68 (2025-06-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.67 (2025-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.66 (2025-04-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.65 (2025-03-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.64 (2025-03-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.63 (2025-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.62 (2025-03-04.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.61 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.60 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.59 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.58 (2025-02-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.57 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.56 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.55 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.17.54 (2025-01-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.53 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.52 (2025-01-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.51 (2025-01-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.50 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.49 (2025-01-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.48 (2024-12-19)
+
+* **Bug Fix**: Fix improper use of printf-style functions.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.47 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.46 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.45 (2024-11-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.44 (2024-11-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.43 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.42 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.41 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.40 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.39 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.38 (2024-10-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.37 (2024-09-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.36 (2024-09-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.35 (2024-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.34 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.33 (2024-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.32 (2024-09-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.31 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.30 (2024-08-26)
+
+* **Bug Fix**: Save SSO cached token expiry in UTC to ensure cross-SDK compatibility.
+
+# v1.17.29 (2024-08-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.28 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.27 (2024-07-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.26 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.25 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.24 (2024-07-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.23 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.22 (2024-06-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.21 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.20 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.19 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.18 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.17 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.16 (2024-05-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.15 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.14 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.13 (2024-05-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.12 (2024-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.11 (2024-04-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.10 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.9 (2024-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.8 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2024-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2024-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.16 (2024-01-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.15 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.14 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.12 (2023-12-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.11 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2023-12-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2023-11-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.4 (2023-11-21)
+
+* **Bug Fix**: Don't expect error responses to have a JSON payload in the endpointcreds provider.
+
+# v1.16.3 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2023-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2023-11-14)
+
+* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider.
+
+# v1.15.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-11-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.43 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.42 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.41 (2023-10-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.40 (2023-09-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.39 (2023-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.38 (2023-09-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.37 (2023-09-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.36 (2023-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.31 (2023-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.30 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.29 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.28 (2023-07-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.27 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.26 (2023-06-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.25 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.24 (2023-05-09)
+
+* No change notes available for this release.
+
+# v1.13.23 (2023-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.22 (2023-05-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.21 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.20 (2023-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.19 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.18 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.17 (2023-03-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.16 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.15 (2023-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.14 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.13 (2023-02-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.12 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.11 (2023-02-01)
+
+* No change notes available for this release.
+
+# v1.13.10 (2023-01-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2023-01-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2023-01-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2022-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2022-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.5 (2022-12-15)
+
+* **Bug Fix**: Unify logic between shared config and in finding home directory
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2022-11-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2022-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2022-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-11-11)
+
+* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
+* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
+
+# v1.12.24 (2022-11-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.23 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.22 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.21 (2022-09-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.20 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.19 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.18 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.17 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.16 (2022-08-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.15 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.14 (2022-08-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.11 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.9 (2022-07-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2022-06-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.5 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2022-05-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.3 (2022-05-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.1 (2022-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-04-25)
+
+* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.2 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-03-23)
+
+* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-02-24)
+
+* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575)
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.5 (2021-12-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.4 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.3 (2021-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.2 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.1 (2021-11-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.3 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.2 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-09-10)
+
+* **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders.
+
+# v1.4.0 (2021-08-27)
+
+* **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go
new file mode 100644
index 000000000..f6e2873ab
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go
@@ -0,0 +1,4 @@
+/*
+Package credentials provides types for retrieving credentials from credentials sources.
+*/
+package credentials
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go
new file mode 100644
index 000000000..6ed71b42b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go
@@ -0,0 +1,58 @@
+// Package ec2rolecreds provides the credentials provider implementation for
+// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS.
+//
+// # Concurrency and caching
+//
+// The Provider is not safe to be used concurrently, and does not provide any
+// caching of credentials retrieved. You should wrap the Provider with a
+// `aws.CredentialsCache` to provide concurrency safety, and caching of
+// credentials.
+//
+// # Loading credentials with the SDK's AWS Config
+//
+// The EC2 Instance role credentials provider will automatically be the resolved
+// credential provider in the credential chain if no other credential provider is
+// resolved first.
+//
+// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance
+// role for credentials, you specify a `credentials_source` property in the config
+// profile the SDK will load.
+//
+// [default]
+// credential_source = Ec2InstanceMetadata
+//
+// # Loading credentials with the Provider directly
+//
+// Another way to use the EC2 Instance role credentials provider is to create it
+// directly and assign it as the credentials provider for an API client.
+//
+// The following example creates a credentials provider for a command, and wraps
+// it with the CredentialsCache before assigning the provider to the Amazon S3 API
+// client's Credentials option.
+//
+// provider := imds.New(imds.Options{})
+//
+// // Create the service client value configured for credentials.
+// svc := s3.New(s3.Options{
+// Credentials: aws.NewCredentialsCache(provider),
+// })
+//
+// If you need more control, you can set the configuration options on the
+// credentials provider using the imds.Options type to configure the EC2 IMDS
+// API Client and ExpiryWindow of the retrieved credentials.
+//
+// provider := imds.New(imds.Options{
+// // See imds.Options type's documentation for more options available.
+// Client: imds.New(Options{
+// HTTPClient: customHTTPClient,
+// }),
+//
+// // Modify how soon credentials expire prior to their original expiry time.
+// ExpiryWindow: 5 * time.Minute,
+// })
+//
+// # EC2 IMDS API Client
+//
+// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on
+// configuring the client, and options available.
+package ec2rolecreds
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go
new file mode 100644
index 000000000..a95e6c8bd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go
@@ -0,0 +1,241 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the
+// GetMetadata operation.
+type GetMetadataAPIClient interface {
+ GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error)
+}
+
+// A Provider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// The New function must be used to create the with a custom EC2 IMDS client.
+//
+// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{
+// o.Client = imds.New(imds.Options{/* custom options */})
+// })
+type Provider struct {
+ options Options
+}
+
+// Options is a list of user settable options for setting the behavior of the Provider.
+type Options struct {
+ // The API client that will be used by the provider to make GetMetadata API
+ // calls to EC2 IMDS.
+ //
+ // If nil, the provider will default to the EC2 IMDS client.
+ Client GetMetadataAPIClient
+
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
+}
+
+// New returns an initialized Provider value configured to retrieve
+// credentials from EC2 Instance Metadata service.
+func New(optFns ...func(*Options)) *Provider {
+ options := Options{}
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.Client == nil {
+ options.Client = imds.New(imds.Options{})
+ }
+
+ return &Provider{
+ options: options,
+ }
+}
+
+// Retrieve retrieves credentials from the EC2 service. Error will be returned
+// if the request fails, or unable to extract the desired credentials.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ credsList, err := requestCredList(ctx, p.options.Client)
+ if err != nil {
+ return aws.Credentials{Source: ProviderName}, err
+ }
+
+ if len(credsList) == 0 {
+ return aws.Credentials{Source: ProviderName},
+ fmt.Errorf("unexpected empty EC2 IMDS role list")
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(ctx, p.options.Client, credsName)
+ if err != nil {
+ return aws.Credentials{Source: ProviderName}, err
+ }
+
+ creds := aws.Credentials{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ Source: ProviderName,
+
+ CanExpire: true,
+ Expires: roleCreds.Expiration,
+ }
+
+ // Cap role credentials Expires to 1 hour so they can be refreshed more
+ // often. Jitter will be applied credentials cache if being used.
+ if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) {
+ creds.Expires = anHour
+ }
+
+ return creds, nil
+}
+
+// HandleFailToRefresh will extend the credentials Expires time if it it is
+// expired. If the credentials will not expire within the minimum time, they
+// will be returned.
+//
+// If the credentials cannot expire, the original error will be returned.
+func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) (
+ aws.Credentials, error,
+) {
+ if !prevCreds.CanExpire {
+ return aws.Credentials{}, err
+ }
+
+ if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) {
+ return prevCreds, nil
+ }
+
+ newCreds := prevCreds
+ randFloat64, err := sdkrand.CryptoRandFloat64()
+ if err != nil {
+ return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err)
+ }
+
+ // Random distribution of [5,15) minutes.
+ expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute
+ newCreds.Expires = sdk.NowTime().Add(expireOffset)
+
+ logger := middleware.GetLogger(ctx)
+ logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes()))
+
+ return newCreds, nil
+}
+
+// AdjustExpiresBy will adds the passed in duration to the passed in
+// credential's Expires time, unless the time until Expires is less than 15
+// minutes. Returns the credentials, even if not updated.
+func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) (
+ aws.Credentials, error,
+) {
+ if !creds.CanExpire {
+ return creds, nil
+ }
+ if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) {
+ return creds, nil
+ }
+
+ creds.Expires = creds.Expires.Add(dur)
+ return creds, nil
+}
+
+// ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "/iam/security-credentials/"
+
+// requestCredList requests a list of credentials from the EC2 service. If
+// there are no credentials, or there is an error making or receiving the
+// request
+func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) {
+ resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{
+ Path: iamSecurityCredsPath,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("no EC2 IMDS role found, %w", err)
+ }
+ defer resp.Content.Close()
+
+ credsList := []string{}
+ s := bufio.NewScanner(resp.Content)
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{
+ Path: path.Join(iamSecurityCredsPath, credsName),
+ })
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w",
+ credsName, err)
+ }
+ defer resp.Content.Close()
+
+ var respCreds ec2RoleCredRespBody
+ if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w",
+ credsName, err)
+ }
+
+ if !strings.EqualFold(respCreds.Code, "Success") {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{},
+ fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w",
+ credsName,
+ &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message})
+ }
+
+ return respCreds, nil
+}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *Provider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceIMDS}
+ } // If no source has been set, assume this is used directly which means just call to assume role
+ return p.options.CredentialSources
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
new file mode 100644
index 000000000..c3f5dadce
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
@@ -0,0 +1,48 @@
+package client
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
new file mode 100644
index 000000000..dc291c97c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
@@ -0,0 +1,165 @@
+package client
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ "github.com/aws/smithy-go"
+ smithymiddleware "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ServiceID is the client identifer
+const ServiceID = "endpoint-credentials"
+
+// HTTPClient is a client for sending HTTP requests
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Options is the endpoint client configurable options
+type Options struct {
+ // The endpoint to retrieve credentials from
+ Endpoint string
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer.
+ Retryer aws.Retryer
+
+ // Set of options to modify how the credentials operation is invoked.
+ APIOptions []func(*smithymiddleware.Stack) error
+}
+
+// Copy creates a copy of the API options.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+ return to
+}
+
+// Client is an client for retrieving AWS credentials from an endpoint
+type Client struct {
+ options Options
+}
+
+// New constructs a new Client from the given options
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ if options.HTTPClient == nil {
+ options.HTTPClient = awshttp.NewBuildableClient()
+ }
+
+ if options.Retryer == nil {
+ // Amazon-owned implementations of this endpoint are known to sometimes
+ // return plaintext responses (i.e. no Code) like normal, add a few
+ // additional status codes
+ options.Retryer = retry.NewStandard(func(o *retry.StandardOptions) {
+ o.Retryables = append(o.Retryables, retry.RetryableHTTPStatusCode{
+ Codes: map[int]struct{}{
+ http.StatusTooManyRequests: {},
+ },
+ })
+ })
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ client := &Client{
+ options: options,
+ }
+
+ return client
+}
+
+// GetCredentialsInput is the input to send with the endpoint service to receive credentials.
+type GetCredentialsInput struct {
+ AuthorizationToken string
+}
+
+// GetCredentials retrieves credentials from credential endpoint
+func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) {
+ stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After)
+ stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After)
+ stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After)
+ addProtocolFinalizerMiddlewares(stack, options, "GetCredentials")
+ retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer})
+ middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID)
+ smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
+ smithyhttp.AddCloseResponseBodyMiddleware(stack)
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, err
+ }
+ }
+
+ handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+ result, _, err := handler.Handle(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ return result.(*GetCredentialsOutput), err
+}
+
+// GetCredentialsOutput is the response from the credential endpoint
+type GetCredentialsOutput struct {
+ Expiration *time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+ AccountID string
+}
+
+// EndpointError is an error returned from the endpoint service
+type EndpointError struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Fault smithy.ErrorFault `json:"-"`
+ statusCode int `json:"-"`
+}
+
+// Error is the error mesage string
+func (e *EndpointError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code, e.Message)
+}
+
+// ErrorCode is the error code returned by the endpoint
+func (e *EndpointError) ErrorCode() string {
+ return e.Code
+}
+
+// ErrorMessage is the error message returned by the endpoint
+func (e *EndpointError) ErrorMessage() string {
+ return e.Message
+}
+
+// ErrorFault indicates error fault classification
+func (e *EndpointError) ErrorFault() smithy.ErrorFault {
+ return e.Fault
+}
+
+// HTTPStatusCode implements retry.HTTPStatusCode.
+func (e *EndpointError) HTTPStatusCode() int {
+ return e.statusCode
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
new file mode 100644
index 000000000..748ee6724
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
new file mode 100644
index 000000000..f2820d20e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
@@ -0,0 +1,164 @@
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/url"
+
+ "github.com/aws/smithy-go"
+ smithymiddleware "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type buildEndpoint struct {
+ Endpoint string
+}
+
+func (b *buildEndpoint) ID() string {
+ return "BuildEndpoint"
+}
+
+func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) (
+ out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport, %T", in.Request)
+ }
+
+ if len(b.Endpoint) == 0 {
+ return out, metadata, fmt.Errorf("endpoint not provided")
+ }
+
+ parsed, err := url.Parse(b.Endpoint)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err)
+ }
+
+ request.URL = parsed
+
+ return next.HandleBuild(ctx, in)
+}
+
+type serializeOpGetCredential struct{}
+
+func (s *serializeOpGetCredential) ID() string {
+ return "OperationSerializer"
+}
+
+func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) (
+ out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request)
+ }
+
+ params, ok := in.Parameters.(*GetCredentialsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters)
+ }
+
+ const acceptHeader = "Accept"
+ request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json")
+
+ if len(params.AuthorizationToken) > 0 {
+ const authHeader = "Authorization"
+ request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken)
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type deserializeOpGetCredential struct{}
+
+func (d *deserializeOpGetCredential) ID() string {
+ return "OperationDeserializer"
+}
+
+func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) (
+ out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, deserializeError(response)
+ }
+
+ var shape *GetCredentialsOutput
+ if err = json.NewDecoder(response.Body).Decode(&shape); err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)}
+ }
+
+ out.Result = shape
+ return out, metadata, err
+}
+
+func deserializeError(response *smithyhttp.Response) error {
+ // we could be talking to anything, json isn't guaranteed
+ // see https://github.com/aws/aws-sdk-go-v2/issues/2316
+ if response.Header.Get("Content-Type") == "application/json" {
+ return deserializeJSONError(response)
+ }
+
+ msg, err := io.ReadAll(response.Body)
+ if err != nil {
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("read response, %w", err),
+ }
+ }
+
+ return &EndpointError{
+ // no sensible value for Code
+ Message: string(msg),
+ Fault: stof(response.StatusCode),
+ statusCode: response.StatusCode,
+ }
+}
+
+func deserializeJSONError(response *smithyhttp.Response) error {
+ var errShape *EndpointError
+ if err := json.NewDecoder(response.Body).Decode(&errShape); err != nil {
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode error message, %w", err),
+ }
+ }
+
+ errShape.Fault = stof(response.StatusCode)
+ errShape.statusCode = response.StatusCode
+ return errShape
+}
+
+// maps HTTP status code to smithy ErrorFault
+func stof(code int) smithy.ErrorFault {
+ if code >= 500 {
+ return smithy.FaultServer
+ }
+ return smithy.FaultClient
+}
+
+func addProtocolFinalizerMiddlewares(stack *smithymiddleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, smithymiddleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", smithymiddleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %w", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", smithymiddleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %w", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", smithymiddleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go
new file mode 100644
index 000000000..c8ac6d9ff
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go
@@ -0,0 +1,207 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+//
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+//
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// "Token" : "AQoDY....=",
+// "Expiration" : "2016-02-25T06:03:31Z"
+// }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+//
+// {
+// "code": "ErrorCode",
+// "message": "Helpful error message."
+// }
+package endpointcreds
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+type getCredentialsAPIClient interface {
+ GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error)
+}
+
+// Provider satisfies the aws.CredentialsProvider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+ // The AWS Client to make HTTP requests to the endpoint with. The endpoint
+ // the request will be made to is provided by the aws.Config's
+ // EndpointResolver.
+ client getCredentialsAPIClient
+
+ options Options
+}
+
+// HTTPClient is a client for sending HTTP requests
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Options is structure of configurable options for Provider
+type Options struct {
+ // Endpoint to retrieve credentials from. Required
+ Endpoint string
+
+ // HTTPClient to handle sending HTTP requests to the target endpoint.
+ HTTPClient HTTPClient
+
+ // Set of options to modify how the credentials operation is invoked.
+ APIOptions []func(*middleware.Stack) error
+
+ // The Retryer to be used for determining whether a failed requested should be retried
+ Retryer aws.Retryer
+
+ // Optional authorization token value if set will be used as the value of
+ // the Authorization header of the endpoint credential request.
+ //
+ // When constructed from environment, the provider will use the value of
+ // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token
+ //
+ // Will be overridden if AuthorizationTokenProvider is configured
+ AuthorizationToken string
+
+ // Optional auth provider func to dynamically load the auth token from a file
+ // everytime a credential is retrieved
+ //
+ // When constructed from environment, the provider will read and use the content
+ // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable
+ // as the auth token everytime credentials are retrieved
+ //
+ // Will override AuthorizationToken if configured
+ AuthorizationTokenProvider AuthTokenProvider
+
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
+}
+
+// AuthTokenProvider defines an interface to dynamically load a value to be passed
+// for the Authorization header of a credentials request.
+type AuthTokenProvider interface {
+ GetToken() (string, error)
+}
+
+// TokenProviderFunc is a func type implementing AuthTokenProvider interface
+// and enables customizing token provider behavior
+type TokenProviderFunc func() (string, error)
+
+// GetToken func retrieves auth token according to TokenProviderFunc implementation
+func (p TokenProviderFunc) GetToken() (string, error) {
+ return p()
+}
+
+// New returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func New(endpoint string, optFns ...func(*Options)) *Provider {
+ o := Options{
+ Endpoint: endpoint,
+ }
+
+ for _, fn := range optFns {
+ fn(&o)
+ }
+
+ p := &Provider{
+ client: client.New(client.Options{
+ HTTPClient: o.HTTPClient,
+ Endpoint: o.Endpoint,
+ APIOptions: o.APIOptions,
+ Retryer: o.Retryer,
+ }),
+ options: o,
+ }
+
+ return p
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ resp, err := p.getCredentials(ctx)
+ if err != nil {
+ return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err)
+ }
+
+ creds := aws.Credentials{
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.Token,
+ Source: ProviderName,
+ AccountID: resp.AccountID,
+ }
+
+ if resp.Expiration != nil {
+ creds.CanExpire = true
+ creds.Expires = *resp.Expiration
+ }
+
+ return creds, nil
+}
+
+func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) {
+ authToken, err := p.resolveAuthToken()
+ if err != nil {
+ return nil, fmt.Errorf("resolve auth token: %v", err)
+ }
+
+ return p.client.GetCredentials(ctx, &client.GetCredentialsInput{
+ AuthorizationToken: authToken,
+ })
+}
+
+func (p *Provider) resolveAuthToken() (string, error) {
+ authToken := p.options.AuthorizationToken
+
+ var err error
+ if p.options.AuthorizationTokenProvider != nil {
+ authToken, err = p.options.AuthorizationTokenProvider.GetToken()
+ if err != nil {
+ return "", err
+ }
+ }
+
+ if strings.ContainsAny(authToken, "\r\n") {
+ return "", fmt.Errorf("authorization token contains invalid newline sequence")
+ }
+
+ return authToken, nil
+}
+
+var _ aws.CredentialProviderSource = (*Provider)(nil)
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *Provider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceHTTP}
+ }
+ return p.options.CredentialSources
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
new file mode 100644
index 000000000..89e632cc2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package credentials
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.18.21"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go
new file mode 100644
index 000000000..a3137b8fa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go
@@ -0,0 +1,92 @@
+// Package processcreds is a credentials provider to retrieve credentials from a
+// external CLI invoked process.
+//
+// WARNING: The following describes a method of sourcing credentials from an external
+// process. This can potentially be dangerous, so proceed with caution. Other
+// credential providers should be preferred if at all possible. If using this
+// option, you should make sure that the config file is as locked down as possible
+// using security best practices for your operating system.
+//
+// # Concurrency and caching
+//
+// The Provider is not safe to be used concurrently, and does not provide any
+// caching of credentials retrieved. You should wrap the Provider with a
+// `aws.CredentialsCache` to provide concurrency safety, and caching of
+// credentials.
+//
+// # Loading credentials with the SDKs AWS Config
+//
+// You can use credentials from a AWS shared config `credential_process` in a
+// variety of ways.
+//
+// One way is to setup your shared config file, located in the default
+// location, with the `credential_process` key and the command you want to be
+// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
+// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
+//
+// [default]
+// credential_process = /command/to/call
+//
+// Loading configuration using external will use the credential process to
+// retrieve credentials. NOTE: If there are credentials in the profile you are
+// using, the credential process will not be used.
+//
+// // Initialize a session to load credentials.
+// cfg, _ := config.LoadDefaultConfig(context.TODO())
+//
+// // Create S3 service client to use the credentials.
+// svc := s3.NewFromConfig(cfg)
+//
+// # Loading credentials with the Provider directly
+//
+// Another way to use the credentials process provider is by using the
+// `NewProvider` constructor to create the provider and providing a it with a
+// command to be executed to retrieve credentials.
+//
+// The following example creates a credentials provider for a command, and wraps
+// it with the CredentialsCache before assigning the provider to the Amazon S3 API
+// client's Credentials option.
+//
+// // Create credentials using the Provider.
+// provider := processcreds.NewProvider("/path/to/command")
+//
+// // Create the service client value configured for credentials.
+// svc := s3.New(s3.Options{
+// Credentials: aws.NewCredentialsCache(provider),
+// })
+//
+// If you need more control, you can set any configurable options in the
+// credentials using one or more option functions.
+//
+// provider := processcreds.NewProvider("/path/to/command",
+// func(o *processcreds.Options) {
+// // Override the provider's default timeout
+// o.Timeout = 2 * time.Minute
+// })
+//
+// You can also use your own `exec.Cmd` value by satisfying a value that satisfies
+// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor.
+//
+// // Create an exec.Cmd
+// cmdBuilder := processcreds.NewCommandBuilderFunc(
+// func(ctx context.Context) (*exec.Cmd, error) {
+// cmd := exec.CommandContext(ctx,
+// "customCLICommand",
+// "-a", "argument",
+// )
+// cmd.Env = []string{
+// "ENV_VAR_FOO=value",
+// "ENV_VAR_BAR=other_value",
+// }
+//
+// return cmd, nil
+// },
+// )
+//
+// // Create credentials using your exec.Cmd and custom timeout
+// provider := processcreds.NewProviderCommand(cmdBuilder,
+// func(opt *processcreds.Provider) {
+// // optionally override the provider's default timeout
+// opt.Timeout = 1 * time.Second
+// })
+package processcreds
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go
new file mode 100644
index 000000000..dfc6b2548
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go
@@ -0,0 +1,296 @@
+package processcreds
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "runtime"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/sdkio"
+)
+
+const (
+ // ProviderName is the name this credentials provider will label any
+ // returned credentials Value with.
+ ProviderName = `ProcessProvider`
+
+ // DefaultTimeout default limit on time a process can run.
+ DefaultTimeout = time.Duration(1) * time.Minute
+)
+
+// ProviderError is an error indicating failure initializing or executing the
+// process credentials provider
+type ProviderError struct {
+ Err error
+}
+
+// Error returns the error message.
+func (e *ProviderError) Error() string {
+ return fmt.Sprintf("process provider error: %v", e.Err)
+}
+
+// Unwrap returns the underlying error the provider error wraps.
+func (e *ProviderError) Unwrap() error {
+ return e.Err
+}
+
+// Provider satisfies the credentials.Provider interface, and is a
+// client to retrieve credentials from a process.
+type Provider struct {
+ // Provides a constructor for exec.Cmd that are invoked by the provider for
+ // retrieving credentials. Use this to provide custom creation of exec.Cmd
+ // with things like environment variables, or other configuration.
+ //
+ // The provider defaults to the DefaultNewCommand function.
+ commandBuilder NewCommandBuilder
+
+ options Options
+}
+
+// Options is the configuration options for configuring the Provider.
+type Options struct {
+ // Timeout limits the time a process can run.
+ Timeout time.Duration
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
+}
+
+// NewCommandBuilder provides the interface for specifying how command will be
+// created that the Provider will use to retrieve credentials with.
+type NewCommandBuilder interface {
+ NewCommand(context.Context) (*exec.Cmd, error)
+}
+
+// NewCommandBuilderFunc provides a wrapper type around a function pointer to
+// satisfy the NewCommandBuilder interface.
+type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error)
+
+// NewCommand calls the underlying function pointer the builder was initialized with.
+func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) {
+ return fn(ctx)
+}
+
+// DefaultNewCommandBuilder provides the default NewCommandBuilder
+// implementation used by the provider. It takes a command and arguments to
+// invoke. The command will also be initialized with the current process
+// environment variables, stderr, and stdin pipes.
+type DefaultNewCommandBuilder struct {
+ Args []string
+}
+
+// NewCommand returns an initialized exec.Cmd with the builder's initialized
+// Args. The command is also initialized current process environment variables,
+// stderr, and stdin pipes.
+func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) {
+ var cmdArgs []string
+ if runtime.GOOS == "windows" {
+ cmdArgs = []string{"cmd.exe", "/C"}
+ } else {
+ cmdArgs = []string{"sh", "-c"}
+ }
+
+ if len(b.Args) == 0 {
+ return nil, &ProviderError{
+ Err: fmt.Errorf("failed to prepare command: command must not be empty"),
+ }
+ }
+
+ cmdArgs = append(cmdArgs, b.Args...)
+ cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...)
+ cmd.Env = os.Environ()
+
+ cmd.Stderr = os.Stderr // display stderr on console for MFA
+ cmd.Stdin = os.Stdin // enable stdin for MFA
+
+ return cmd, nil
+}
+
+// NewProvider returns a pointer to a new Credentials object wrapping the
+// Provider.
+//
+// The provider defaults to the DefaultNewCommandBuilder for creating command
+// the Provider will use to retrieve credentials with.
+func NewProvider(command string, options ...func(*Options)) *Provider {
+ var args []string
+
+ // Ensure that the command arguments are not set if the provided command is
+ // empty. This will error out when the command is executed since no
+ // arguments are specified.
+ if len(command) > 0 {
+ args = []string{command}
+ }
+
+ commanBuilder := DefaultNewCommandBuilder{
+ Args: args,
+ }
+ return NewProviderCommand(commanBuilder, options...)
+}
+
+// NewProviderCommand returns a pointer to a new Credentials object with the
+// specified command, and default timeout duration. Use this to provide custom
+// creation of exec.Cmd for options like environment variables, or other
+// configuration.
+func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider {
+ p := &Provider{
+ commandBuilder: builder,
+ options: Options{
+ Timeout: DefaultTimeout,
+ },
+ }
+
+ for _, option := range options {
+ option(&p.options)
+ }
+
+ return p
+}
+
+// A CredentialProcessResponse is the AWS credentials format that must be
+// returned when executing an external credential_process.
+type CredentialProcessResponse struct {
+ // As of this writing, the Version key must be set to 1. This might
+ // increment over time as the structure evolves.
+ Version int
+
+ // The access key ID that identifies the temporary security credentials.
+ AccessKeyID string `json:"AccessKeyId"`
+
+ // The secret access key that can be used to sign requests.
+ SecretAccessKey string
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ SessionToken string
+
+ // The date on which the current credentials expire.
+ Expiration *time.Time
+
+ // The ID of the account for credentials
+ AccountID string `json:"AccountId"`
+}
+
+// Retrieve executes the credential process command and returns the
+// credentials, or error if the command fails.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ out, err := p.executeCredentialProcess(ctx)
+ if err != nil {
+ return aws.Credentials{Source: ProviderName}, err
+ }
+
+ // Serialize and validate response
+ resp := &CredentialProcessResponse{}
+ if err = json.Unmarshal(out, resp); err != nil {
+ return aws.Credentials{Source: ProviderName}, &ProviderError{
+ Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err),
+ }
+ }
+
+ if resp.Version != 1 {
+ return aws.Credentials{Source: ProviderName}, &ProviderError{
+ Err: fmt.Errorf("wrong version in process output (not 1)"),
+ }
+ }
+
+ if len(resp.AccessKeyID) == 0 {
+ return aws.Credentials{Source: ProviderName}, &ProviderError{
+ Err: fmt.Errorf("missing AccessKeyId in process output"),
+ }
+ }
+
+ if len(resp.SecretAccessKey) == 0 {
+ return aws.Credentials{Source: ProviderName}, &ProviderError{
+ Err: fmt.Errorf("missing SecretAccessKey in process output"),
+ }
+ }
+
+ creds := aws.Credentials{
+ Source: ProviderName,
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.SessionToken,
+ AccountID: resp.AccountID,
+ }
+
+ // Handle expiration
+ if resp.Expiration != nil {
+ creds.CanExpire = true
+ creds.Expires = *resp.Expiration
+ }
+
+ return creds, nil
+}
+
+// executeCredentialProcess starts the credential process on the OS and
+// returns the results or an error.
+func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) {
+ if p.options.Timeout >= 0 {
+ var cancelFunc func()
+ ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout)
+ defer cancelFunc()
+ }
+
+ cmd, err := p.commandBuilder.NewCommand(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // get creds json on process's stdout
+ output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte)))
+ if cmd.Stdout != nil {
+ cmd.Stdout = io.MultiWriter(cmd.Stdout, output)
+ } else {
+ cmd.Stdout = output
+ }
+
+ execCh := make(chan error, 1)
+ go executeCommand(cmd, execCh)
+
+ select {
+ case execError := <-execCh:
+ if execError == nil {
+ break
+ }
+ select {
+ case <-ctx.Done():
+ return output.Bytes(), &ProviderError{
+ Err: fmt.Errorf("credential process timed out: %w", execError),
+ }
+ default:
+ return output.Bytes(), &ProviderError{
+ Err: fmt.Errorf("error in credential_process: %w", execError),
+ }
+ }
+ }
+
+ out := output.Bytes()
+ if runtime.GOOS == "windows" {
+ // windows adds slashes to quotes
+ out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`))
+ }
+
+ return out, nil
+}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *Provider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceProcess}
+ }
+ return p.options.CredentialSources
+}
+
+func executeCommand(cmd *exec.Cmd, exec chan error) {
+ // Start the command
+ err := cmd.Start()
+ if err == nil {
+ err = cmd.Wait()
+ }
+
+ exec <- err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go
new file mode 100644
index 000000000..ece1e65f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go
@@ -0,0 +1,81 @@
+// Package ssocreds provides a credential provider for retrieving temporary AWS
+// credentials using an SSO access token.
+//
+// IMPORTANT: The provider in this package does not initiate or perform the AWS
+// SSO login flow. The SDK provider expects that you have already performed the
+// SSO login flow using AWS CLI using the "aws sso login" command, or by some
+// other mechanism. The provider must find a valid non-expired access token for
+// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not
+// found, it is expired, or the file is malformed an error will be returned.
+//
+// # Loading AWS SSO credentials with the AWS shared configuration file
+//
+// You can use configure AWS SSO credentials from the AWS shared configuration file by
+// specifying the required keys in the profile and referencing an sso-session:
+//
+// sso_session
+// sso_account_id
+// sso_role_name
+//
+// For example, the following defines a profile "devsso" and specifies the AWS
+// SSO parameters that defines the target account, role, sign-on portal, and
+// the region where the user portal is located. Note: all SSO arguments must be
+// provided, or an error will be returned.
+//
+// [profile devsso]
+// sso_session = dev-session
+// sso_role_name = SSOReadOnlyRole
+// sso_account_id = 123456789012
+//
+// [sso-session dev-session]
+// sso_start_url = https://my-sso-portal.awsapps.com/start
+// sso_region = us-east-1
+// sso_registration_scopes = sso:account:access
+//
+// Using the config module, you can load the AWS SDK shared configuration, and
+// specify that this profile be used to retrieve credentials. For example:
+//
+// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso"))
+// if err != nil {
+// return err
+// }
+//
+// # Programmatically loading AWS SSO credentials directly
+//
+// You can programmatically construct the AWS SSO Provider in your application,
+// and provide the necessary information to load and retrieve temporary
+// credentials using an access token from ~/.aws/sso/cache.
+//
+// ssoClient := sso.NewFromConfig(cfg)
+// ssoOidcClient := ssooidc.NewFromConfig(cfg)
+// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session")
+// if err != nil {
+// return err
+// }
+//
+// var provider aws.CredentialsProvider
+// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) {
+// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath)
+// })
+//
+// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time
+// provider = aws.NewCredentialsCache(provider)
+//
+// credentials, err := provider.Retrieve(context.TODO())
+// if err != nil {
+// return err
+// }
+//
+// It is important that you wrap the Provider with aws.CredentialsCache if you
+// are programmatically constructing the provider directly. This prevents your
+// application from accessing the cached access token and requesting new
+// credentials each time the credentials are used.
+//
+// # Additional Resources
+//
+// Configuring the AWS CLI to use AWS Single Sign-On:
+// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+//
+// AWS Single Sign-On User Guide:
+// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
+package ssocreds
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go
new file mode 100644
index 000000000..46ae2f923
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go
@@ -0,0 +1,233 @@
+package ssocreds
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
+)
+
+var osUserHomeDur = shareddefaults.UserHomeDir
+
+// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or
+// error if unable get derive the path. Key that will be used to compute a SHA1
+// value that is hex encoded.
+//
+// Derives the filepath using the Key as:
+//
+// ~/.aws/sso/cache/.json
+func StandardCachedTokenFilepath(key string) (string, error) {
+ homeDir := osUserHomeDur()
+ if len(homeDir) == 0 {
+ return "", fmt.Errorf("unable to get USER's home directory for cached token")
+ }
+ hash := sha1.New()
+ if _, err := hash.Write([]byte(key)); err != nil {
+ return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err)
+ }
+
+ cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json"
+
+ return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil
+}
+
+type tokenKnownFields struct {
+ AccessToken string `json:"accessToken,omitempty"`
+ ExpiresAt *rfc3339 `json:"expiresAt,omitempty"`
+
+ RefreshToken string `json:"refreshToken,omitempty"`
+ ClientID string `json:"clientId,omitempty"`
+ ClientSecret string `json:"clientSecret,omitempty"`
+}
+
+type token struct {
+ tokenKnownFields
+ UnknownFields map[string]interface{} `json:"-"`
+}
+
+func (t token) MarshalJSON() ([]byte, error) {
+ fields := map[string]interface{}{}
+
+ setTokenFieldString(fields, "accessToken", t.AccessToken)
+ setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt)
+
+ setTokenFieldString(fields, "refreshToken", t.RefreshToken)
+ setTokenFieldString(fields, "clientId", t.ClientID)
+ setTokenFieldString(fields, "clientSecret", t.ClientSecret)
+
+ for k, v := range t.UnknownFields {
+ if _, ok := fields[k]; ok {
+ return nil, fmt.Errorf("unknown token field %v, duplicates known field", k)
+ }
+ fields[k] = v
+ }
+
+ return json.Marshal(fields)
+}
+
+func setTokenFieldString(fields map[string]interface{}, key, value string) {
+ if value == "" {
+ return
+ }
+ fields[key] = value
+}
+func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) {
+ if value == nil {
+ return
+ }
+ fields[key] = value
+}
+
+func (t *token) UnmarshalJSON(b []byte) error {
+ var fields map[string]interface{}
+ if err := json.Unmarshal(b, &fields); err != nil {
+ return nil
+ }
+
+ t.UnknownFields = map[string]interface{}{}
+
+ for k, v := range fields {
+ var err error
+ switch k {
+ case "accessToken":
+ err = getTokenFieldString(v, &t.AccessToken)
+ case "expiresAt":
+ err = getTokenFieldRFC3339(v, &t.ExpiresAt)
+ case "refreshToken":
+ err = getTokenFieldString(v, &t.RefreshToken)
+ case "clientId":
+ err = getTokenFieldString(v, &t.ClientID)
+ case "clientSecret":
+ err = getTokenFieldString(v, &t.ClientSecret)
+ default:
+ t.UnknownFields[k] = v
+ }
+
+ if err != nil {
+ return fmt.Errorf("field %q, %w", k, err)
+ }
+ }
+
+ return nil
+}
+
+func getTokenFieldString(v interface{}, value *string) error {
+ var ok bool
+ *value, ok = v.(string)
+ if !ok {
+ return fmt.Errorf("expect value to be string, got %T", v)
+ }
+ return nil
+}
+
+func getTokenFieldRFC3339(v interface{}, value **rfc3339) error {
+ var stringValue string
+ if err := getTokenFieldString(v, &stringValue); err != nil {
+ return err
+ }
+
+ timeValue, err := parseRFC3339(stringValue)
+ if err != nil {
+ return err
+ }
+
+ *value = &timeValue
+ return nil
+}
+
+func loadCachedToken(filename string) (token, error) {
+ fileBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err)
+ }
+
+ var t token
+ if err := json.Unmarshal(fileBytes, &t); err != nil {
+ return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err)
+ }
+
+ if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() {
+ return token{}, fmt.Errorf(
+ "cached SSO token must contain accessToken and expiresAt fields")
+ }
+
+ return t, nil
+}
+
+func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) {
+ tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10)
+ if err := writeCacheFile(tmpFilename, fileMode, t); err != nil {
+ return err
+ }
+
+ if err := os.Rename(tmpFilename, filename); err != nil {
+ return fmt.Errorf("failed to replace old cached SSO token file, %w", err)
+ }
+
+ return nil
+}
+
+func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) {
+ var f *os.File
+ f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode)
+ if err != nil {
+ return fmt.Errorf("failed to create cached SSO token file %w", err)
+ }
+
+ defer func() {
+ closeErr := f.Close()
+ if err == nil && closeErr != nil {
+ err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr)
+ }
+ }()
+
+ encoder := json.NewEncoder(f)
+
+ if err = encoder.Encode(t); err != nil {
+ return fmt.Errorf("failed to serialize cached SSO token, %w", err)
+ }
+
+ return nil
+}
+
+type rfc3339 time.Time
+
+func parseRFC3339(v string) (rfc3339, error) {
+ parsed, err := time.Parse(time.RFC3339, v)
+ if err != nil {
+ return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err)
+ }
+
+ return rfc3339(parsed), nil
+}
+
+func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) {
+ var value string
+
+ // Use JSON unmarshal to unescape the quoted value making use of JSON's
+ // unquoting rules.
+ if err = json.Unmarshal(bytes, &value); err != nil {
+ return err
+ }
+
+ *r, err = parseRFC3339(value)
+
+ return nil
+}
+
+func (r *rfc3339) MarshalJSON() ([]byte, error) {
+ value := time.Time(*r).UTC().Format(time.RFC3339)
+
+ // Use JSON unmarshal to unescape the quoted value making use of JSON's
+ // quoting rules.
+ return json.Marshal(value)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go
new file mode 100644
index 000000000..3ed9cbb3e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go
@@ -0,0 +1,165 @@
+package ssocreds
+
+import (
+ "context"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/aws-sdk-go-v2/service/sso"
+)
+
+// ProviderName is the name of the provider used to specify the source of
+// credentials.
+const ProviderName = "SSOProvider"
+
+// GetRoleCredentialsAPIClient is a API client that implements the
+// GetRoleCredentials operation.
+type GetRoleCredentialsAPIClient interface {
+ GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) (
+ *sso.GetRoleCredentialsOutput, error,
+ )
+}
+
+// Options is the Provider options structure.
+type Options struct {
+ // The Client which is configured for the AWS Region where the AWS SSO user
+ // portal is located.
+ Client GetRoleCredentialsAPIClient
+
+ // The AWS account that is assigned to the user.
+ AccountID string
+
+ // The role name that is assigned to the user.
+ RoleName string
+
+ // The URL that points to the organization's AWS Single Sign-On (AWS SSO)
+ // user portal.
+ StartURL string
+
+ // The filepath the cached token will be retrieved from. If unset Provider will
+ // use the startURL to determine the filepath at.
+ //
+ // ~/.aws/sso/cache/.json
+ //
+ // If custom cached token filepath is used, the Provider's startUrl
+ // parameter will be ignored.
+ CachedTokenFilepath string
+
+ // Used by the SSOCredentialProvider if a token configuration
+ // profile is used in the shared config
+ SSOTokenProvider *SSOTokenProvider
+
+ // The chain of providers that was used to create this provider.
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
+}
+
+// Provider is an AWS credential provider that retrieves temporary AWS
+// credentials by exchanging an SSO login token.
+type Provider struct {
+ options Options
+
+ cachedTokenFilepath string
+}
+
+// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The
+// provided client is expected to be configured for the AWS Region where the
+// AWS SSO user portal is located.
+func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider {
+ options := Options{
+ Client: client,
+ AccountID: accountID,
+ RoleName: roleName,
+ StartURL: startURL,
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &Provider{
+ options: options,
+ cachedTokenFilepath: options.CachedTokenFilepath,
+ }
+}
+
+// Retrieve retrieves temporary AWS credentials from the configured Amazon
+// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present
+// in ~/.aws/sso/cache. However, if a token provider configuration exists
+// in the shared config, then we ought to use the token provider rather then
+// direct access on the cached token.
+func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ var accessToken *string
+ if p.options.SSOTokenProvider != nil {
+ token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx)
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+ accessToken = &token.Value
+ } else {
+ if p.cachedTokenFilepath == "" {
+ cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL)
+ if err != nil {
+ return aws.Credentials{}, &InvalidTokenError{Err: err}
+ }
+ p.cachedTokenFilepath = cachedTokenFilepath
+ }
+
+ tokenFile, err := loadCachedToken(p.cachedTokenFilepath)
+ if err != nil {
+ return aws.Credentials{}, &InvalidTokenError{Err: err}
+ }
+
+ if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) {
+ return aws.Credentials{}, &InvalidTokenError{}
+ }
+ accessToken = &tokenFile.AccessToken
+ }
+
+ output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{
+ AccessToken: accessToken,
+ AccountId: &p.options.AccountID,
+ RoleName: &p.options.RoleName,
+ })
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+
+ return aws.Credentials{
+ AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId),
+ SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey),
+ SessionToken: aws.ToString(output.RoleCredentials.SessionToken),
+ CanExpire: true,
+ Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(),
+ Source: ProviderName,
+ AccountID: p.options.AccountID,
+ }, nil
+}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *Provider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceSSO}
+ }
+ return p.options.CredentialSources
+}
+
+// InvalidTokenError is the error type that is returned if loaded token has
+// expired or is otherwise invalid. To refresh the SSO session run AWS SSO
+// login with the corresponding profile.
+type InvalidTokenError struct {
+ Err error
+}
+
+func (i *InvalidTokenError) Unwrap() error {
+ return i.Err
+}
+
+func (i *InvalidTokenError) Error() string {
+ const msg = "the SSO session has expired or is invalid"
+ if i.Err == nil {
+ return msg
+ }
+ return msg + ": " + i.Err.Error()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go
new file mode 100644
index 000000000..7f4fc5467
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go
@@ -0,0 +1,147 @@
+package ssocreds
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/aws-sdk-go-v2/service/ssooidc"
+ "github.com/aws/smithy-go/auth/bearer"
+)
+
+// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API
+// client for calling CreateToken operation to refresh the SSO token.
+type CreateTokenAPIClient interface {
+ CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) (
+ *ssooidc.CreateTokenOutput, error,
+ )
+}
+
+// SSOTokenProviderOptions provides the options for configuring the
+// SSOTokenProvider.
+type SSOTokenProviderOptions struct {
+ // Client that can be overridden
+ Client CreateTokenAPIClient
+
+ // The set of API Client options to be applied when invoking the
+ // CreateToken operation.
+ ClientOptions []func(*ssooidc.Options)
+
+ // The path the file containing the cached SSO token will be read from.
+ // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter.
+ CachedTokenFilepath string
+}
+
+// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for
+// Bearer Authentication. The SSOTokenProvider can only be used to refresh
+// already cached SSO Tokens. This utility cannot perform the initial SSO
+// create token.
+//
+// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in
+// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's
+// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with
+// the smithy-go TokenCache, if the external configuration loaded configured
+// for an SSO session.
+//
+// The initial SSO create token should be preformed with the AWS CLI before the
+// Go application using the SSOTokenProvider will need to retrieve the SSO
+// token. If the AWS CLI has not created the token cache file, this provider
+// will return an error when attempting to retrieve the cached token.
+//
+// This provider will attempt to refresh the cached SSO token periodically if
+// needed when RetrieveBearerToken is called.
+//
+// A utility such as the AWS CLI must be used to initially create the SSO
+// session and cached token file.
+// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+type SSOTokenProvider struct {
+ options SSOTokenProviderOptions
+}
+
+var _ bearer.TokenProvider = (*SSOTokenProvider)(nil)
+
+// NewSSOTokenProvider returns an initialized SSOTokenProvider that will
+// periodically refresh the SSO token cached stored in the cachedTokenFilepath.
+// The cachedTokenFilepath file's content will be rewritten by the token
+// provider when the token is refreshed.
+//
+// The client must be configured for the AWS region the SSO token was created for.
+func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider {
+ options := SSOTokenProviderOptions{
+ Client: client,
+ CachedTokenFilepath: cachedTokenFilepath,
+ }
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ provider := &SSOTokenProvider{
+ options: options,
+ }
+
+ return provider
+}
+
+// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath
+// the SSOTokenProvider was created with. If the token has expired
+// RetrieveBearerToken will attempt to refresh it. If the token cannot be
+// refreshed or is not present an error will be returned.
+//
+// A utility such as the AWS CLI must be used to initially create the SSO
+// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
+func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) {
+ cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath)
+ if err != nil {
+ return bearer.Token{}, err
+ }
+
+ if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) {
+ cachedToken, err = p.refreshToken(ctx, cachedToken)
+ if err != nil {
+ return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err)
+ }
+ }
+
+ expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt))
+ return bearer.Token{
+ Value: cachedToken.AccessToken,
+ CanExpire: !expiresAt.IsZero(),
+ Expires: expiresAt,
+ }, nil
+}
+
+func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) {
+ if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" {
+ return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed")
+ }
+
+ createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{
+ ClientId: &cachedToken.ClientID,
+ ClientSecret: &cachedToken.ClientSecret,
+ RefreshToken: &cachedToken.RefreshToken,
+ GrantType: aws.String("refresh_token"),
+ }, p.options.ClientOptions...)
+ if err != nil {
+ return token{}, fmt.Errorf("unable to refresh SSO token, %w", err)
+ }
+
+ expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second)
+
+ cachedToken.AccessToken = aws.ToString(createResult.AccessToken)
+ cachedToken.ExpiresAt = (*rfc3339)(&expiresAt)
+ cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken)
+
+ fileInfo, err := os.Stat(p.options.CachedTokenFilepath)
+ if err != nil {
+ return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err)
+ }
+
+ if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil {
+ return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err)
+ }
+
+ return cachedToken, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go
new file mode 100644
index 000000000..a469abdb7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go
@@ -0,0 +1,63 @@
+package credentials
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+const (
+ // StaticCredentialsName provides a name of Static provider
+ StaticCredentialsName = "StaticCredentials"
+)
+
+// StaticCredentialsEmptyError is emitted when static credentials are empty.
+type StaticCredentialsEmptyError struct{}
+
+func (*StaticCredentialsEmptyError) Error() string {
+ return "static credentials are empty"
+}
+
+// A StaticCredentialsProvider is a set of credentials which are set, and will
+// never expire.
+type StaticCredentialsProvider struct {
+ Value aws.Credentials
+ // These values are for reporting purposes and are not meant to be set up directly
+ Source []aws.CredentialSource
+}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (s StaticCredentialsProvider) ProviderSources() []aws.CredentialSource {
+ if s.Source == nil {
+ return []aws.CredentialSource{aws.CredentialSourceCode} // If no source has been set, assume this is used directly which means hardcoded creds
+ }
+ return s.Source
+}
+
+// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS
+// credentials passed in.
+func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider {
+ return StaticCredentialsProvider{
+ Value: aws.Credentials{
+ AccessKeyID: key,
+ SecretAccessKey: secret,
+ SessionToken: session,
+ },
+ }
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) {
+ v := s.Value
+ if v.AccessKeyID == "" || v.SecretAccessKey == "" {
+ return aws.Credentials{
+ Source: StaticCredentialsName,
+ }, &StaticCredentialsEmptyError{}
+ }
+
+ if len(v.Source) == 0 {
+ v.Source = StaticCredentialsName
+ }
+
+ return v, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go
new file mode 100644
index 000000000..1ccf71e77
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,338 @@
+// Package stscreds are credential Providers to retrieve STS AWS credentials.
+//
+// STS provides multiple ways to retrieve credentials which can be used when making
+// future AWS service API operation calls.
+//
+// The SDK will ensure that per instance of credentials.Credentials all requests
+// to refresh the credentials will be synchronized. But, the SDK is unable to
+// ensure synchronous usage of the AssumeRoleProvider if the value is shared
+// between multiple Credentials or service clients.
+//
+// # Assume Role
+//
+// To assume an IAM role using STS with the SDK you can create a new Credentials
+// with the SDKs's stscreds package.
+//
+// // Initial credentials loaded from SDK's default credential chain. Such as
+// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+// // Role. These credentials will be used to to make the STS Assume Role API.
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// // Create the credentials from AssumeRoleProvider to assume the role
+// // referenced by the "myRoleARN" ARN.
+// stsSvc := sts.NewFromConfig(cfg)
+// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn")
+//
+// cfg.Credentials = aws.NewCredentialsCache(creds)
+//
+// // Create service client value configured for credentials
+// // from assumed role.
+// svc := s3.NewFromConfig(cfg)
+//
+// # Assume Role with custom MFA Token provider
+//
+// To assume an IAM role with a MFA token you can either specify a custom MFA
+// token provider or use the SDK's built in StdinTokenProvider that will prompt
+// the user for a token code each time the credentials need to to be refreshed.
+// Specifying a custom token provider allows you to control where the token
+// code is retrieved from, and how it is refreshed.
+//
+// With a custom token provider, the provider is responsible for refreshing the
+// token code when called.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// staticTokenProvider := func() (string, error) {
+// return someTokenCode, nil
+// }
+//
+// // Create the credentials from AssumeRoleProvider to assume the role
+// // referenced by the "myRoleARN" ARN using the MFA token code provided.
+// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
+// o.SerialNumber = aws.String("myTokenSerialNumber")
+// o.TokenProvider = staticTokenProvider
+// })
+//
+// cfg.Credentials = aws.NewCredentialsCache(creds)
+//
+// // Create service client value configured for credentials
+// // from assumed role.
+// svc := s3.NewFromConfig(cfg)
+//
+// # Assume Role with MFA Token Provider
+//
+// To assume an IAM role with MFA for longer running tasks where the credentials
+// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+// will allow the credential provider to prompt for new MFA token code when the
+// role's credentials need to be refreshed.
+//
+// The StdinTokenProvider function is available to prompt on stdin to retrieve
+// the MFA token code from the user. You can also implement custom prompts by
+// satisfying the TokenProvider function signature.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// // Create the credentials from AssumeRoleProvider to assume the role
+// // referenced by the "myRoleARN" ARN using the MFA token code provided.
+// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
+// o.SerialNumber = aws.String("myTokenSerialNumber")
+// o.TokenProvider = stscreds.StdinTokenProvider
+// })
+//
+// cfg.Credentials = aws.NewCredentialsCache(creds)
+//
+// // Create service client value configured for credentials
+// // from assumed role.
+// svc := s3.NewFromConfig(cfg)
+package stscreds
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+)
+
+// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+ var v string
+ fmt.Printf("Assume Role MFA token code: ")
+ _, err := fmt.Scanln(&v)
+
+ return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation.
+type AssumeRoleAPIClient interface {
+ AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the
+// credentials will be valid for. This value is only used by AssumeRoleProvider
+// for specifying the default expiry duration of an assume role.
+//
+// Other providers such as WebIdentityRoleProvider do not use this value, and
+// instead rely on STS API's default parameter handing to assign a default
+// value.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+ options AssumeRoleOptions
+}
+
+// AssumeRoleOptions is the configurable options for AssumeRoleProvider
+type AssumeRoleOptions struct {
+ // Client implementation of the AssumeRole operation. Required
+ Client AssumeRoleAPIClient
+
+ // IAM Role ARN to be assumed. Required
+ RoleARN string
+
+ // Session name, if you wish to uniquely identify this session.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // Optional ExternalID to pass along, defaults to nil if not set.
+ ExternalID *string
+
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string
+
+ // The ARNs of IAM managed policies you want to use as managed session policies.
+ // The policies must exist in the same account as the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plain text that you use for both inline and managed session
+ // policies can't exceed 2,048 characters.
+ //
+ // An AWS conversion compresses the passed session policies and session tags
+ // into a packed binary format that has a separate limit. Your request can fail
+ // for this limit even if your plain text meets the other requirements. The
+ // PackedPolicySize response element indicates by percentage how close the policies
+ // and tags for your request are to the upper size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's identity-based
+ // policy and the session policies. You can use the role's temporary credentials
+ // in subsequent AWS API calls to access resources in the account that owns
+ // the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyARNs []types.PolicyDescriptorType
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ SerialNumber *string
+
+ // The source identity specified by the principal that is calling the AssumeRole
+ // operation. You can require users to specify a source identity when they assume a
+ // role. You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. You can use source identity information in CloudTrail logs to determine
+ // who took actions with a role. You can use the aws:SourceIdentity condition key
+ // to further control access to Amazon Web Services resources based on the value of
+ // source identity. For more information about using source identity, see Monitor
+ // and control actions taken with assumed roles
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
+ // in the IAM User Guide.
+ SourceIdentity *string
+
+ // Async method of providing MFA token code for assuming an IAM role with MFA.
+ // The value returned by the function will be used as the TokenCode in the Retrieve
+ // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed when SerialNumber is set.
+ TokenProvider func() (string, error)
+
+ // A list of session tags that you want to pass. Each session tag consists of a key
+ // name and an associated value. For more information about session tags, see
+ // Tagging STS Sessions
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
+ // IAM User Guide. This parameter is optional. You can pass up to 50 session tags.
+ Tags []types.Tag
+
+ // A list of keys for session tags that you want to set as transitive. If you set a
+ // tag key as transitive, the corresponding key and value passes to subsequent
+ // sessions in a role chain. For more information, see Chaining Roles with Session
+ // Tags
+ // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
+ // in the IAM User Guide. This parameter is optional.
+ TransitiveTagKeys []string
+
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
+}
+
+// NewAssumeRoleProvider constructs and returns a credentials provider that
+// will retrieve credentials by assuming a IAM role using STS.
+func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider {
+ o := AssumeRoleOptions{
+ Client: client,
+ RoleARN: roleARN,
+ }
+
+ for _, fn := range optFns {
+ fn(&o)
+ }
+
+ return &AssumeRoleProvider{
+ options: o,
+ }
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ // Apply defaults where parameters are not set.
+ if len(p.options.RoleSessionName) == 0 {
+ // Try to work out a role name that will hopefully end up unique.
+ p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano())
+ }
+ if p.options.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.options.Duration = DefaultDuration
+ }
+ input := &sts.AssumeRoleInput{
+ DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)),
+ PolicyArns: p.options.PolicyARNs,
+ RoleArn: aws.String(p.options.RoleARN),
+ RoleSessionName: aws.String(p.options.RoleSessionName),
+ ExternalId: p.options.ExternalID,
+ SourceIdentity: p.options.SourceIdentity,
+ Tags: p.options.Tags,
+ TransitiveTagKeys: p.options.TransitiveTagKeys,
+ }
+ if p.options.Policy != nil {
+ input.Policy = p.options.Policy
+ }
+ if p.options.SerialNumber != nil {
+ if p.options.TokenProvider != nil {
+ input.SerialNumber = p.options.SerialNumber
+ code, err := p.options.TokenProvider()
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+ input.TokenCode = aws.String(code)
+ } else {
+ return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set")
+ }
+ }
+
+ resp, err := p.options.Client.AssumeRole(ctx, input)
+ if err != nil {
+ return aws.Credentials{Source: ProviderName}, err
+ }
+
+ var accountID string
+ if resp.AssumedRoleUser != nil {
+ accountID = getAccountID(resp.AssumedRoleUser)
+ }
+
+ return aws.Credentials{
+ AccessKeyID: *resp.Credentials.AccessKeyId,
+ SecretAccessKey: *resp.Credentials.SecretAccessKey,
+ SessionToken: *resp.Credentials.SessionToken,
+ Source: ProviderName,
+
+ CanExpire: true,
+ Expires: *resp.Credentials.Expiration,
+ AccountID: accountID,
+ }, nil
+}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *AssumeRoleProvider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRole}
+ } // If no source has been set, assume this is used directly which means just call to assume role
+ return append(p.options.CredentialSources, aws.CredentialSourceSTSAssumeRole)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go
new file mode 100644
index 000000000..5f4286dda
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go
@@ -0,0 +1,181 @@
+package stscreds
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/aws-sdk-go-v2/service/sts"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+)
+
+var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode()
+
+const (
+ // WebIdentityProviderName is the web identity provider name
+ WebIdentityProviderName = "WebIdentityCredentials"
+)
+
+// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation.
+type AssumeRoleWithWebIdentityAPIClient interface {
+ AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error)
+}
+
+// WebIdentityRoleProvider is used to retrieve credentials using
+// an OIDC token.
+type WebIdentityRoleProvider struct {
+ options WebIdentityRoleOptions
+}
+
+// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider
+type WebIdentityRoleOptions struct {
+ // Client implementation of the AssumeRoleWithWebIdentity operation. Required
+ Client AssumeRoleWithWebIdentityAPIClient
+
+ // JWT Token Provider. Required
+ TokenRetriever IdentityTokenRetriever
+
+ // IAM Role ARN to assume. Required
+ RoleARN string
+
+ // Session name, if you wish to uniquely identify this session.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. STS will assign a default expiry
+ // duration if this value is unset. This is different from the Duration
+ // option of AssumeRoleProvider, which automatically assigns 15 minutes if
+ // Duration is unset.
+ //
+ // See the STS AssumeRoleWithWebIdentity API reference guide for more
+ // information on defaults.
+ // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html
+ Duration time.Duration
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ Policy *string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you
+ // want to use as managed session policies. The policies must exist in the
+ // same account as the role.
+ PolicyARNs []types.PolicyDescriptorType
+
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
+}
+
+// IdentityTokenRetriever is an interface for retrieving a JWT
+type IdentityTokenRetriever interface {
+ GetIdentityToken() ([]byte, error)
+}
+
+// IdentityTokenFile is for retrieving an identity token from the given file name
+type IdentityTokenFile string
+
+// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte
+func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) {
+ b, err := ioutil.ReadFile(string(j))
+ if err != nil {
+ return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err)
+ }
+
+ return b, nil
+}
+
+// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
+// provided stsiface.ClientAPI
+func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider {
+ o := WebIdentityRoleOptions{
+ Client: client,
+ RoleARN: roleARN,
+ TokenRetriever: tokenRetriever,
+ }
+
+ for _, fn := range optFns {
+ fn(&o)
+ }
+
+ return &WebIdentityRoleProvider{options: o}
+}
+
+// Retrieve will attempt to assume a role from a token which is located at
+// 'WebIdentityTokenFilePath' specified destination and if that is empty an
+// error will be returned.
+func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ b, err := p.options.TokenRetriever.GetIdentityToken()
+ if err != nil {
+ return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err)
+ }
+
+ sessionName := p.options.RoleSessionName
+ if len(sessionName) == 0 {
+ // session name is used to uniquely identify a session. This simply
+ // uses unix time in nanoseconds to uniquely identify sessions.
+ sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10)
+ }
+ input := &sts.AssumeRoleWithWebIdentityInput{
+ PolicyArns: p.options.PolicyARNs,
+ RoleArn: &p.options.RoleARN,
+ RoleSessionName: &sessionName,
+ WebIdentityToken: aws.String(string(b)),
+ }
+ if p.options.Duration != 0 {
+ // If set use the value, otherwise STS will assign a default expiration duration.
+ input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second))
+ }
+ if p.options.Policy != nil {
+ input.Policy = p.options.Policy
+ }
+
+ resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) {
+ options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode)
+ })
+ if err != nil {
+ return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err)
+ }
+
+ var accountID string
+ if resp.AssumedRoleUser != nil {
+ accountID = getAccountID(resp.AssumedRoleUser)
+ }
+
+ // InvalidIdentityToken error is a temporary error that can occur
+ // when assuming an Role with a JWT web identity token.
+
+ value := aws.Credentials{
+ AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId),
+ SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey),
+ SessionToken: aws.ToString(resp.Credentials.SessionToken),
+ Source: WebIdentityProviderName,
+ CanExpire: true,
+ Expires: *resp.Credentials.Expiration,
+ AccountID: accountID,
+ }
+ return value, nil
+}
+
+// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]"
+func getAccountID(u *types.AssumedRoleUser) string {
+ if u.Arn == nil {
+ return ""
+ }
+ parts := strings.Split(*u.Arn, ":")
+ if len(parts) < 5 {
+ return ""
+ }
+ return parts[4]
+}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *WebIdentityRoleProvider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRoleWebID}
+ }
+ return p.options.CredentialSources
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
new file mode 100644
index 000000000..cfb9d77fe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
@@ -0,0 +1,512 @@
+# v1.18.13 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.18.12 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.11 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.10 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.9 (2025-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.8 (2025-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2025-09-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.6 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.4 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2025-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.2 (2025-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2025-07-29)
+
+* **Feature**: Add config switch `DisableDefaultMaxBackoff` that allows you to disable the default maximum backoff (1 second) for IMDS calls retry attempt
+
+# v1.17.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.33 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.32 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.31 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.30 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.29 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.28 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.27 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.26 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.25 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.16.24 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.23 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.22 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.21 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.20 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.19 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.18 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.17 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.16 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.15 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.14 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.12 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.11 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.4 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.3 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2024-03-21)
+
+* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls.
+
+# v1.15.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.11 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.10 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.9 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.8 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.7 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.6 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.5 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.4 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.3 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.2 (2023-11-02)
+
+* No change notes available for this release.
+
+# v1.14.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.13 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.12 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.11 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.10 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.5 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2023-03-14)
+
+* **Feature**: Add flag to disable IMDSv1 fallback
+
+# v1.12.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.19 (2022-10-24)
+
+* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.17 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.16 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.15 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.14 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.11 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.9 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-11-06)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-10-11)
+
+* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout.
+* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-08-04)
+
+* **Feature**: adds error handling for defered close calls
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-07-15)
+
+* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints.
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
new file mode 100644
index 000000000..75edc4e9d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
@@ -0,0 +1,358 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ServiceID provides the unique name of this API client
+const ServiceID = "ec2imds"
+
+// Client provides the API client for interacting with the Amazon EC2 Instance
+// Metadata Service API.
+type Client struct {
+ options Options
+}
+
+// ClientEnableState provides an enumeration if the client is enabled,
+// disabled, or default behavior.
+type ClientEnableState = internalconfig.ClientEnableState
+
+// Enumeration values for ClientEnableState
+const (
+ ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior
+ ClientDisabled ClientEnableState = internalconfig.ClientDisabled // client disabled
+ ClientEnabled ClientEnableState = internalconfig.ClientEnabled // client enabled
+)
+
+// EndpointModeState is an enum configuration variable describing the client endpoint mode.
+// Not configurable directly, but used when using the NewFromConfig.
+type EndpointModeState = internalconfig.EndpointModeState
+
+// Enumeration values for EndpointModeState
+const (
+ EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset
+ EndpointModeStateIPv4 EndpointModeState = internalconfig.EndpointModeStateIPv4
+ EndpointModeStateIPv6 EndpointModeState = internalconfig.EndpointModeStateIPv6
+)
+
+const (
+ disableClientEnvVar = "AWS_EC2_METADATA_DISABLED"
+
+ // Client endpoint options
+ endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT"
+
+ defaultIPv4Endpoint = "http://169.254.169.254"
+ defaultIPv6Endpoint = "http://[fd00:ec2::254]"
+)
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ options.HTTPClient = resolveHTTPClient(options.HTTPClient)
+
+ if options.Retryer == nil {
+ options.Retryer = retry.NewStandard()
+ }
+ if !options.DisableDefaultMaxBackoff {
+ options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second)
+ }
+
+ if options.ClientEnableState == ClientDefaultEnableState {
+ if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") {
+ options.ClientEnableState = ClientDisabled
+ }
+ }
+
+ if len(options.Endpoint) == 0 {
+ if v := os.Getenv(endpointEnvVar); len(v) != 0 {
+ options.Endpoint = v
+ }
+ }
+
+ client := &Client{
+ options: options,
+ }
+
+ if client.options.tokenProvider == nil && !client.options.disableAPIToken {
+ client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL)
+ }
+
+ return client
+}
+
+// NewFromConfig returns an initialized Client based the AWS SDK config, and
+// functional options. Provide additional functional options to further
+// configure the behavior of the client, such as changing the client's endpoint
+// or adding custom middleware behavior.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...),
+ HTTPClient: cfg.HTTPClient,
+ ClientLogMode: cfg.ClientLogMode,
+ Logger: cfg.Logger,
+ }
+
+ if cfg.Retryer != nil {
+ opts.Retryer = cfg.Retryer()
+ }
+
+ resolveClientEnableState(cfg, &opts)
+ resolveEndpointConfig(cfg, &opts)
+ resolveEndpointModeConfig(cfg, &opts)
+ resolveEnableFallback(cfg, &opts)
+
+ return New(opts, optFns...)
+}
+
+// Options provides the fields for configuring the API client's behavior.
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation
+ // call to modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // The endpoint the client will use to retrieve EC2 instance metadata.
+ //
+ // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode.
+ //
+ // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT
+ // has a value the client will use the value of the environment variable as
+ // the endpoint for operation calls.
+ //
+ // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
+ Endpoint string
+
+ // The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field.
+ //
+ // Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint.
+ // Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint.
+ //
+ // By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4.
+ EndpointMode EndpointModeState
+
+ // The HTTP client to invoke API calls with. Defaults to client's default
+ // HTTP implementation if nil.
+ HTTPClient HTTPClient
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer.
+ Retryer aws.Retryer
+
+ // Changes if the EC2 Instance Metadata client is enabled or not. Client
+ // will default to enabled if not set to ClientDisabled. When the client is
+ // disabled it will return an error for all operation calls.
+ //
+ // If ClientEnableState value is ClientDefaultEnableState (default value),
+ // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to
+ // "true", the client will be disabled.
+ //
+ // AWS_EC2_METADATA_DISABLED=true
+ ClientEnableState ClientEnableState
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // Configure IMDSv1 fallback behavior. By default, the client will attempt
+ // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary]
+ // the client will return any errors encountered from attempting to fetch a token
+ // instead of silently using the insecure data flow of IMDSv1.
+ //
+ // See [configuring IMDS] for more information.
+ //
+ // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
+ EnableFallback aws.Ternary
+
+ // By default, all IMDS client operations enforce a 5-second timeout. You
+ // can disable that behavior with this setting.
+ DisableDefaultTimeout bool
+
+ // By default all IMDS client operations enforce a 1-second retry delay at maximum.
+ // You can disable that behavior with this setting.
+ DisableDefaultMaxBackoff bool
+
+ // provides the caching of API tokens used for operation calls. If unset,
+ // the API token will not be retrieved for the operation.
+ tokenProvider *tokenProvider
+
+ // option to disable the API token provider for testing.
+ disableAPIToken bool
+}
+
+// HTTPClient provides the interface for a client making HTTP requests with the
+// API.
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// Copy creates a copy of the API options.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...)
+ return to
+}
+
+// WithAPIOptions wraps the API middleware functions, as a functional option
+// for the API Client Options. Use this helper to add additional functional
+// options to the API client, or operation calls.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options),
+ stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.ClientEnableState == ClientDisabled {
+ return nil, metadata, &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: fmt.Errorf(
+ "access disabled to EC2 IMDS via client option, or %q environment variable",
+ disableClientEnvVar),
+ }
+ }
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
+ result, metadata, err = handler.Handle(ctx, params)
+ if err != nil {
+ return nil, metadata, &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+
+ return result, metadata, err
+}
+
+const (
+ // HTTP client constants
+ defaultDialerTimeout = 250 * time.Millisecond
+ defaultResponseHeaderTimeout = 500 * time.Millisecond
+)
+
+func resolveHTTPClient(client HTTPClient) HTTPClient {
+ if client == nil {
+ client = awshttp.NewBuildableClient()
+ }
+
+ if c, ok := client.(*awshttp.BuildableClient); ok {
+ client = c.
+ WithDialerOptions(func(d *net.Dialer) {
+ // Use a custom Dial timeout for the EC2 Metadata service to account
+ // for the possibility the application might not be running in an
+ // environment with the service present. The client should fail fast in
+ // this case.
+ d.Timeout = defaultDialerTimeout
+ }).
+ WithTransportOptions(func(tr *http.Transport) {
+ // Use a custom Transport timeout for the EC2 Metadata service to
+ // account for the possibility that the application might be running in
+ // a container, and EC2Metadata service drops the connection after a
+ // single IP Hop. The client should fail fast in this case.
+ tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout
+ })
+ }
+
+ return client
+}
+
+func resolveClientEnableState(cfg aws.Config, options *Options) error {
+ if options.ClientEnableState != ClientDefaultEnableState {
+ return nil
+ }
+ value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources)
+ if err != nil || !found {
+ return err
+ }
+ options.ClientEnableState = value
+ return nil
+}
+
+func resolveEndpointModeConfig(cfg aws.Config, options *Options) error {
+ if options.EndpointMode != EndpointModeStateUnset {
+ return nil
+ }
+ value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources)
+ if err != nil || !found {
+ return err
+ }
+ options.EndpointMode = value
+ return nil
+}
+
+func resolveEndpointConfig(cfg aws.Config, options *Options) error {
+ if len(options.Endpoint) != 0 {
+ return nil
+ }
+ value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources)
+ if err != nil || !found {
+ return err
+ }
+ options.Endpoint = value
+ return nil
+}
+
+func resolveEnableFallback(cfg aws.Config, options *Options) {
+ if options.EnableFallback != aws.UnknownTernary {
+ return
+ }
+
+ disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources)
+ if !ok {
+ return
+ }
+
+ if disabled {
+ options.EnableFallback = aws.FalseTernary
+ } else {
+ options.EnableFallback = aws.TrueTernary
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
new file mode 100644
index 000000000..af58b6bb1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
@@ -0,0 +1,77 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getDynamicDataPath = "/latest/dynamic"
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) {
+ if params == nil {
+ params = &GetDynamicDataInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns,
+ addGetDynamicDataMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetDynamicDataOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetDynamicDataInput provides the input parameters for the GetDynamicData
+// operation.
+type GetDynamicDataInput struct {
+ // The relative dynamic data path to retrieve. Can be empty string to
+ // retrieve a response containing a new line separated list of dynamic data
+ // resources available.
+ //
+ // Must not include the dynamic data base path.
+ //
+ // May include leading slash. If Path includes trailing slash the trailing
+ // slash will be included in the request for the resource.
+ Path string
+}
+
+// GetDynamicDataOutput provides the output parameters for the GetDynamicData
+// operation.
+type GetDynamicDataOutput struct {
+ Content io.ReadCloser
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ "GetDynamicData",
+ buildGetDynamicDataPath,
+ buildGetDynamicDataOutput)
+}
+
+func buildGetDynamicDataPath(params interface{}) (string, error) {
+ p, ok := params.(*GetDynamicDataInput)
+ if !ok {
+ return "", fmt.Errorf("unknown parameter type %T", params)
+ }
+
+ return appendURIPath(getDynamicDataPath, p.Path), nil
+}
+
+func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) {
+ return &GetDynamicDataOutput{
+ Content: resp.Body,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
new file mode 100644
index 000000000..5111cc90c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
@@ -0,0 +1,103 @@
+package imds
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/aws/smithy-go"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getIAMInfoPath = getMetadataPath + "/iam/info"
+
+// GetIAMInfo retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *Client) GetIAMInfo(
+ ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options),
+) (
+ *GetIAMInfoOutput, error,
+) {
+ if params == nil {
+ params = &GetIAMInfoInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns,
+ addGetIAMInfoMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetIAMInfoOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetIAMInfoInput provides the input parameters for GetIAMInfo operation.
+type GetIAMInfoInput struct{}
+
+// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation.
+type GetIAMInfoOutput struct {
+ IAMInfo
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ "GetIAMInfo",
+ buildGetIAMInfoPath,
+ buildGetIAMInfoOutput,
+ )
+}
+
+func buildGetIAMInfoPath(params interface{}) (string, error) {
+ return getIAMInfoPath, nil
+}
+
+func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) {
+ defer func() {
+ closeErr := resp.Body.Close()
+ if err == nil {
+ err = closeErr
+ } else if closeErr != nil {
+ err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err)
+ }
+ }()
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(resp.Body, ringBuffer)
+
+ imdsResult := &GetIAMInfoOutput{}
+ if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil {
+ return nil, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode instance identity document, %w", err),
+ Snapshot: ringBuffer.Bytes(),
+ }
+ }
+ // Any code other success is an error
+ if !strings.EqualFold(imdsResult.Code, "success") {
+ return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s",
+ imdsResult.Code)
+ }
+
+ return imdsResult, nil
+}
+
+// IAMInfo provides the shape for unmarshaling an IAM info from the metadata
+// API.
+type IAMInfo struct {
+ Code string
+ LastUpdated time.Time
+ InstanceProfileArn string
+ InstanceProfileID string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
new file mode 100644
index 000000000..dc8c09edf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
@@ -0,0 +1,110 @@
+package imds
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/aws/smithy-go"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document"
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *Client) GetInstanceIdentityDocument(
+ ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options),
+) (
+ *GetInstanceIdentityDocumentOutput, error,
+) {
+ if params == nil {
+ params = &GetInstanceIdentityDocumentInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns,
+ addGetInstanceIdentityDocumentMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetInstanceIdentityDocumentOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetInstanceIdentityDocumentInput provides the input parameters for
+// GetInstanceIdentityDocument operation.
+type GetInstanceIdentityDocumentInput struct{}
+
+// GetInstanceIdentityDocumentOutput provides the output parameters for
+// GetInstanceIdentityDocument operation.
+type GetInstanceIdentityDocumentOutput struct {
+ InstanceIdentityDocument
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ "GetInstanceIdentityDocument",
+ buildGetInstanceIdentityDocumentPath,
+ buildGetInstanceIdentityDocumentOutput,
+ )
+}
+
+func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) {
+ return getInstanceIdentityDocumentPath, nil
+}
+
+func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) {
+ defer func() {
+ closeErr := resp.Body.Close()
+ if err == nil {
+ err = closeErr
+ } else if closeErr != nil {
+ err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err)
+ }
+ }()
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(resp.Body, ringBuffer)
+
+ output := &GetInstanceIdentityDocumentOutput{}
+ if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil {
+ return nil, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode instance identity document, %w", err),
+ Snapshot: ringBuffer.Bytes(),
+ }
+ }
+
+ return output, nil
+}
+
+// InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type InstanceIdentityDocument struct {
+ DevpayProductCodes []string `json:"devpayProductCodes"`
+ MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
+ AvailabilityZone string `json:"availabilityZone"`
+ PrivateIP string `json:"privateIp"`
+ Version string `json:"version"`
+ Region string `json:"region"`
+ InstanceID string `json:"instanceId"`
+ BillingProducts []string `json:"billingProducts"`
+ InstanceType string `json:"instanceType"`
+ AccountID string `json:"accountId"`
+ PendingTime time.Time `json:"pendingTime"`
+ ImageID string `json:"imageId"`
+ KernelID string `json:"kernelId"`
+ RamdiskID string `json:"ramdiskId"`
+ Architecture string `json:"architecture"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
new file mode 100644
index 000000000..869bfc9fe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
@@ -0,0 +1,77 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getMetadataPath = "/latest/meta-data"
+
+// GetMetadata uses the path provided to request information from the Amazon
+// EC2 Instance Metadata Service. The content will be returned as a string, or
+// error if the request failed.
+func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) {
+ if params == nil {
+ params = &GetMetadataInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns,
+ addGetMetadataMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetMetadataOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetMetadataInput provides the input parameters for the GetMetadata
+// operation.
+type GetMetadataInput struct {
+ // The relative metadata path to retrieve. Can be empty string to retrieve
+ // a response containing a new line separated list of metadata resources
+ // available.
+ //
+ // Must not include the metadata base path.
+ //
+ // May include leading slash. If Path includes trailing slash the trailing slash
+ // will be included in the request for the resource.
+ Path string
+}
+
+// GetMetadataOutput provides the output parameters for the GetMetadata
+// operation.
+type GetMetadataOutput struct {
+ Content io.ReadCloser
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ "GetMetadata",
+ buildGetMetadataPath,
+ buildGetMetadataOutput)
+}
+
+func buildGetMetadataPath(params interface{}) (string, error) {
+ p, ok := params.(*GetMetadataInput)
+ if !ok {
+ return "", fmt.Errorf("unknown parameter type %T", params)
+ }
+
+ return appendURIPath(getMetadataPath, p.Path), nil
+}
+
+func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) {
+ return &GetMetadataOutput{
+ Content: resp.Body,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
new file mode 100644
index 000000000..8c0572bb5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
@@ -0,0 +1,73 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// GetRegion retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *Client) GetRegion(
+ ctx context.Context, params *GetRegionInput, optFns ...func(*Options),
+) (
+ *GetRegionOutput, error,
+) {
+ if params == nil {
+ params = &GetRegionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns,
+ addGetRegionMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetRegionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetRegionInput provides the input parameters for GetRegion operation.
+type GetRegionInput struct{}
+
+// GetRegionOutput provides the output parameters for GetRegion operation.
+type GetRegionOutput struct {
+ Region string
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetRegionMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ "GetRegion",
+ buildGetInstanceIdentityDocumentPath,
+ buildGetRegionOutput,
+ )
+}
+
+func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) {
+ out, err := buildGetInstanceIdentityDocumentOutput(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ result, ok := out.(*GetInstanceIdentityDocumentOutput)
+ if !ok {
+ return nil, fmt.Errorf("unexpected instance identity document type, %T", out)
+ }
+
+ region := result.Region
+ if len(region) == 0 {
+ return "", fmt.Errorf("instance metadata did not return a region value")
+ }
+
+ return &GetRegionOutput{
+ Region: region,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
new file mode 100644
index 000000000..1f9ee97a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
@@ -0,0 +1,119 @@
+package imds
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getTokenPath = "/latest/api/token"
+const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds"
+
+// getToken uses the duration to return a token for EC2 IMDS, or an error if
+// the request failed.
+func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) {
+ if params == nil {
+ params = &getTokenInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns,
+ addGetTokenMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*getTokenOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type getTokenInput struct {
+ TokenTTL time.Duration
+}
+
+type getTokenOutput struct {
+ Token string
+ TokenTTL time.Duration
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetTokenMiddleware(stack *middleware.Stack, options Options) error {
+ err := addRequestMiddleware(stack,
+ options,
+ "PUT",
+ "GetToken",
+ buildGetTokenPath,
+ buildGetTokenOutput)
+ if err != nil {
+ return err
+ }
+
+ err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func buildGetTokenPath(interface{}) (string, error) {
+ return getTokenPath, nil
+}
+
+func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) {
+ defer func() {
+ closeErr := resp.Body.Close()
+ if err == nil {
+ err = closeErr
+ } else if closeErr != nil {
+ err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err)
+ }
+ }()
+
+ ttlHeader := resp.Header.Get(tokenTTLHeader)
+ tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse API token, %w", err)
+ }
+
+ var token strings.Builder
+ if _, err = io.Copy(&token, resp.Body); err != nil {
+ return nil, fmt.Errorf("unable to read API token, %w", err)
+ }
+
+ return &getTokenOutput{
+ Token: token.String(),
+ TokenTTL: time.Duration(tokenTTL) * time.Second,
+ }, nil
+}
+
+type tokenTTLRequestHeader struct{}
+
+func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" }
+func (*tokenTTLRequestHeader) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request)
+ }
+
+ input, ok := in.Parameters.(*getTokenInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters)
+ }
+
+ req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second)))
+
+ return next.HandleSerialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
new file mode 100644
index 000000000..890369724
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
@@ -0,0 +1,61 @@
+package imds
+
+import (
+ "context"
+ "io"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const getUserDataPath = "/latest/user-data"
+
+// GetUserData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) {
+ if params == nil {
+ params = &GetUserDataInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns,
+ addGetUserDataMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetUserDataOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// GetUserDataInput provides the input parameters for the GetUserData
+// operation.
+type GetUserDataInput struct{}
+
+// GetUserDataOutput provides the output parameters for the GetUserData
+// operation.
+type GetUserDataOutput struct {
+ Content io.ReadCloser
+
+ ResultMetadata middleware.Metadata
+}
+
+func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error {
+ return addAPIRequestMiddleware(stack,
+ options,
+ "GetUserData",
+ buildGetUserDataPath,
+ buildGetUserDataOutput)
+}
+
+func buildGetUserDataPath(params interface{}) (string, error) {
+ return getUserDataPath, nil
+}
+
+func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) {
+ return &GetUserDataOutput{
+ Content: resp.Body,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
new file mode 100644
index 000000000..ad283cf82
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
@@ -0,0 +1,48 @@
+package imds
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
new file mode 100644
index 000000000..d5765c36b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
@@ -0,0 +1,12 @@
+// Package imds provides the API client for interacting with the Amazon EC2
+// Instance Metadata Service.
+//
+// All Client operation calls have a default timeout. If the operation is not
+// completed before this timeout expires, the operation will be canceled. This
+// timeout can be overridden through the following:
+// - Set the options flag DisableDefaultTimeout
+// - Provide a Context with a timeout or deadline with calling the client's operations.
+//
+// See the EC2 IMDS user guide for more information on using the API.
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
+package imds
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
new file mode 100644
index 000000000..d7540da34
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
@@ -0,0 +1,20 @@
+package imds
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
new file mode 100644
index 000000000..1af019aec
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package imds
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.18.13"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go
new file mode 100644
index 000000000..ce7745589
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go
@@ -0,0 +1,114 @@
+package config
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ClientEnableState provides an enumeration if the client is enabled,
+// disabled, or default behavior.
+type ClientEnableState uint
+
+// Enumeration values for ClientEnableState
+const (
+ ClientDefaultEnableState ClientEnableState = iota
+ ClientDisabled
+ ClientEnabled
+)
+
+// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode
+type EndpointModeState uint
+
+// Enumeration values for ClientEnableState
+const (
+ EndpointModeStateUnset EndpointModeState = iota
+ EndpointModeStateIPv4
+ EndpointModeStateIPv6
+)
+
+// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset
+func (e *EndpointModeState) SetFromString(v string) error {
+ v = strings.TrimSpace(v)
+
+ switch {
+ case len(v) == 0:
+ *e = EndpointModeStateUnset
+ case strings.EqualFold(v, "IPv6"):
+ *e = EndpointModeStateIPv6
+ case strings.EqualFold(v, "IPv4"):
+ *e = EndpointModeStateIPv4
+ default:
+ return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4")
+ }
+ return nil
+}
+
+// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled.
+type ClientEnableStateResolver interface {
+ GetEC2IMDSClientEnableState() (ClientEnableState, bool, error)
+}
+
+// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration.
+type EndpointModeResolver interface {
+ GetEC2IMDSEndpointMode() (EndpointModeState, bool, error)
+}
+
+// EndpointResolver is a config resolver interface for retrieving the endpoint.
+type EndpointResolver interface {
+ GetEC2IMDSEndpoint() (string, bool, error)
+}
+
+type v1FallbackDisabledResolver interface {
+ GetEC2IMDSV1FallbackDisabled() (bool, bool)
+}
+
+// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources.
+func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) {
+ for _, source := range sources {
+ if resolver, ok := source.(ClientEnableStateResolver); ok {
+ value, found, err = resolver.GetEC2IMDSClientEnableState()
+ if err != nil || found {
+ return value, found, err
+ }
+ }
+ }
+ return value, found, err
+}
+
+// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources.
+func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) {
+ for _, source := range sources {
+ if resolver, ok := source.(EndpointModeResolver); ok {
+ value, found, err = resolver.GetEC2IMDSEndpointMode()
+ if err != nil || found {
+ return value, found, err
+ }
+ }
+ }
+ return value, found, err
+}
+
+// ResolveEndpointConfig resolves the endpoint from a list of configuration sources.
+func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) {
+ for _, source := range sources {
+ if resolver, ok := source.(EndpointResolver); ok {
+ value, found, err = resolver.GetEC2IMDSEndpoint()
+ if err != nil || found {
+ return value, found, err
+ }
+ }
+ }
+ return value, found, err
+}
+
+// ResolveV1FallbackDisabled ...
+func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) {
+ for _, source := range sources {
+ if resolver, ok := source.(v1FallbackDisabledResolver); ok {
+ if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found {
+ return v, true
+ }
+ }
+ }
+ return false, false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
new file mode 100644
index 000000000..90cf4aeb3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
@@ -0,0 +1,313 @@
+package imds
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "path"
+ "time"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func addAPIRequestMiddleware(stack *middleware.Stack,
+ options Options,
+ operation string,
+ getPath func(interface{}) (string, error),
+ getOutput func(*smithyhttp.Response) (interface{}, error),
+) (err error) {
+ err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput)
+ if err != nil {
+ return err
+ }
+
+ // Token Serializer build and state management.
+ if !options.disableAPIToken {
+ err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After)
+ if err != nil {
+ return err
+ }
+
+ err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func addRequestMiddleware(stack *middleware.Stack,
+ options Options,
+ method string,
+ operation string,
+ getPath func(interface{}) (string, error),
+ getOutput func(*smithyhttp.Response) (interface{}, error),
+) (err error) {
+ err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack)
+ if err != nil {
+ return err
+ }
+
+ // Operation timeout
+ err = stack.Initialize.Add(&operationTimeout{
+ Disabled: options.DisableDefaultTimeout,
+ DefaultTimeout: defaultOperationTimeout,
+ }, middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ // Operation Serializer
+ err = stack.Serialize.Add(&serializeRequest{
+ GetPath: getPath,
+ Method: method,
+ }, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ // Operation endpoint resolver
+ err = stack.Serialize.Insert(&resolveEndpoint{
+ Endpoint: options.Endpoint,
+ EndpointMode: options.EndpointMode,
+ }, "OperationSerializer", middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ // Operation Deserializer
+ err = stack.Deserialize.Add(&deserializeResponse{
+ GetOutput: getOutput,
+ }, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: options.ClientLogMode.IsRequest(),
+ LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(),
+ LogResponse: options.ClientLogMode.IsResponse(),
+ LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ err = addSetLoggerMiddleware(stack, options)
+ if err != nil {
+ return err
+ }
+
+ if err := addProtocolFinalizerMiddlewares(stack, options, operation); err != nil {
+ return fmt.Errorf("add protocol finalizers: %w", err)
+ }
+
+ // Retry support
+ return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{
+ Retryer: options.Retryer,
+ LogRetryAttempts: options.ClientLogMode.IsRetries(),
+ })
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+type serializeRequest struct {
+ GetPath func(interface{}) (string, error)
+ Method string
+}
+
+func (*serializeRequest) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *serializeRequest) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ reqPath, err := m.GetPath(in.Parameters)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable to get request URL path, %w", err)
+ }
+
+ request.Request.URL.Path = reqPath
+ request.Request.Method = m.Method
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type deserializeResponse struct {
+ GetOutput func(*smithyhttp.Response) (interface{}, error)
+}
+
+func (*deserializeResponse) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *deserializeResponse) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, fmt.Errorf(
+ "unexpected transport response type, %T, want %T", out.RawResponse, resp)
+ }
+ defer resp.Body.Close()
+
+ // read the full body so that any operation timeouts cleanup will not race
+ // the body being read.
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return out, metadata, fmt.Errorf("read response body failed, %w", err)
+ }
+ resp.Body = ioutil.NopCloser(bytes.NewReader(body))
+
+ // Anything that's not 200 |< 300 is error
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ return out, metadata, &smithyhttp.ResponseError{
+ Response: resp,
+ Err: fmt.Errorf("request to EC2 IMDS failed"),
+ }
+ }
+
+ result, err := m.GetOutput(resp)
+ if err != nil {
+ return out, metadata, fmt.Errorf(
+ "unable to get deserialized result for response, %w", err,
+ )
+ }
+ out.Result = result
+
+ return out, metadata, err
+}
+
+type resolveEndpoint struct {
+ Endpoint string
+ EndpointMode EndpointModeState
+}
+
+func (*resolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *resolveEndpoint) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ var endpoint string
+ if len(m.Endpoint) > 0 {
+ endpoint = m.Endpoint
+ } else {
+ switch m.EndpointMode {
+ case EndpointModeStateIPv6:
+ endpoint = defaultIPv6Endpoint
+ case EndpointModeStateIPv4:
+ fallthrough
+ case EndpointModeStateUnset:
+ endpoint = defaultIPv4Endpoint
+ default:
+ return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode")
+ }
+ }
+
+ req.URL, err = url.Parse(endpoint)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+const (
+ defaultOperationTimeout = 5 * time.Second
+)
+
+// operationTimeout adds a timeout on the middleware stack if the Context the
+// stack was called with does not have a deadline. The next middleware must
+// complete before the timeout, or the context will be canceled.
+//
+// If DefaultTimeout is zero, no default timeout will be used if the Context
+// does not have a timeout.
+//
+// The next middleware must also ensure that any resources that are also
+// canceled by the stack's context are completely consumed before returning.
+// Otherwise the timeout cleanup will race the resource being consumed
+// upstream.
+type operationTimeout struct {
+ Disabled bool
+ DefaultTimeout time.Duration
+}
+
+func (*operationTimeout) ID() string { return "OperationTimeout" }
+
+func (m *operationTimeout) HandleInitialize(
+ ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ output middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.Disabled {
+ return next.HandleInitialize(ctx, input)
+ }
+
+ if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 {
+ var cancelFn func()
+ ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout)
+ defer cancelFn()
+ }
+
+ return next.HandleInitialize(ctx, input)
+}
+
+// appendURIPath joins a URI path component to the existing path with `/`
+// separators between the path components. If the path being added ends with a
+// trailing `/` that slash will be maintained.
+func appendURIPath(base, add string) string {
+ reqPath := path.Join(base, add)
+ if len(add) != 0 && add[len(add)-1] == '/' {
+ reqPath += "/"
+ }
+ return reqPath
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %w", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %w", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go
new file mode 100644
index 000000000..5703c6e16
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go
@@ -0,0 +1,261 @@
+package imds
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/logging"
+ "net/http"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const (
+ // Headers for Token and TTL
+ tokenHeader = "x-aws-ec2-metadata-token"
+ defaultTokenTTL = 5 * time.Minute
+)
+
+type tokenProvider struct {
+ client *Client
+ tokenTTL time.Duration
+
+ token *apiToken
+ tokenMux sync.RWMutex
+
+ disabled uint32 // Atomic updated
+}
+
+func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider {
+ return &tokenProvider{
+ client: client,
+ tokenTTL: ttl,
+ }
+}
+
+// apiToken provides the API token used by all operation calls for th EC2
+// Instance metadata service.
+type apiToken struct {
+ token string
+ expires time.Time
+}
+
+var timeNow = time.Now
+
+// Expired returns if the token is expired.
+func (t *apiToken) Expired() bool {
+ // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry
+ // time is always based on reported wall-clock time.
+ return timeNow().Round(0).After(t.expires)
+}
+
+func (t *tokenProvider) ID() string { return "APITokenProvider" }
+
+// HandleFinalize is the finalize stack middleware, that if the token provider is
+// enabled, will attempt to add the cached API token to the request. If the API
+// token is not cached, it will be retrieved in a separate API call, getToken.
+//
+// For retry attempts, handler must be added after attempt retryer.
+//
+// If request for getToken fails the token provider may be disabled from future
+// requests, depending on the response status code.
+func (t *tokenProvider) HandleFinalize(
+ ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if t.fallbackEnabled() && !t.enabled() {
+ // short-circuits to insecure data flow if token provider is disabled.
+ return next.HandleFinalize(ctx, input)
+ }
+
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request)
+ }
+
+ tok, err := t.getToken(ctx)
+ if err != nil {
+ // If the error allows the token to downgrade to insecure flow allow that.
+ var bypassErr *bypassTokenRetrievalError
+ if errors.As(err, &bypassErr) {
+ return next.HandleFinalize(ctx, input)
+ }
+
+ return out, metadata, fmt.Errorf("failed to get API token, %w", err)
+ }
+
+ req.Header.Set(tokenHeader, tok.token)
+
+ return next.HandleFinalize(ctx, input)
+}
+
+// HandleDeserialize is the deserialize stack middleware for determining if the
+// operation the token provider is decorating failed because of a 401
+// unauthorized status code. If the operation failed for that reason the token
+// provider needs to be re-enabled so that it can start adding the API token to
+// operation calls.
+func (t *tokenProvider) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, input)
+ if err == nil {
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse)
+ }
+
+ if resp.StatusCode == http.StatusUnauthorized { // unauthorized
+ t.enable()
+ err = &retryableError{Err: err, isRetryable: true}
+ }
+
+ return out, metadata, err
+}
+
+func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) {
+ if t.fallbackEnabled() && !t.enabled() {
+ return nil, &bypassTokenRetrievalError{
+ Err: fmt.Errorf("cannot get API token, provider disabled"),
+ }
+ }
+
+ t.tokenMux.RLock()
+ tok = t.token
+ t.tokenMux.RUnlock()
+
+ if tok != nil && !tok.Expired() {
+ return tok, nil
+ }
+
+ tok, err = t.updateToken(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return tok, nil
+}
+
+func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
+ t.tokenMux.Lock()
+ defer t.tokenMux.Unlock()
+
+ // Prevent multiple requests to update retrieving the token.
+ if t.token != nil && !t.token.Expired() {
+ tok := t.token
+ return tok, nil
+ }
+
+ result, err := t.client.getToken(ctx, &getTokenInput{
+ TokenTTL: t.tokenTTL,
+ })
+ if err != nil {
+ var statusErr interface{ HTTPStatusCode() int }
+ if errors.As(err, &statusErr) {
+ switch statusErr.HTTPStatusCode() {
+ // Disable future get token if failed because of 403, 404, or 405
+ case http.StatusForbidden,
+ http.StatusNotFound,
+ http.StatusMethodNotAllowed:
+
+ if t.fallbackEnabled() {
+ logger := middleware.GetLogger(ctx)
+ logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err)
+ t.disable()
+ }
+
+ // 400 errors are terminal, and need to be upstreamed
+ case http.StatusBadRequest:
+ return nil, err
+ }
+ }
+
+ // Disable if request send failed or timed out getting response
+ var re *smithyhttp.RequestSendError
+ var ce *smithy.CanceledError
+ if errors.As(err, &re) || errors.As(err, &ce) {
+ atomic.StoreUint32(&t.disabled, 1)
+ }
+
+ if !t.fallbackEnabled() {
+ // NOTE: getToken() is an implementation detail of some outer operation
+ // (e.g. GetMetadata). It has its own retries that have already been exhausted.
+ // Mark the underlying error as a terminal error.
+ err = &retryableError{Err: err, isRetryable: false}
+ return nil, err
+ }
+
+ // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request
+ // and allow the request to proceed. Future requests _may_ re-attempt fetching a
+ // token if not disabled.
+ return nil, &bypassTokenRetrievalError{Err: err}
+ }
+
+ tok := &apiToken{
+ token: result.Token,
+ expires: timeNow().Add(result.TokenTTL),
+ }
+ t.token = tok
+
+ return tok, nil
+}
+
+// enabled returns if the token provider is current enabled or not.
+func (t *tokenProvider) enabled() bool {
+ return atomic.LoadUint32(&t.disabled) == 0
+}
+
+// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise
+func (t *tokenProvider) fallbackEnabled() bool {
+ switch t.client.options.EnableFallback {
+ case aws.FalseTernary:
+ return false
+ default:
+ return true
+ }
+}
+
+// disable disables the token provider and it will no longer attempt to inject
+// the token, nor request updates.
+func (t *tokenProvider) disable() {
+ atomic.StoreUint32(&t.disabled, 1)
+}
+
+// enable enables the token provide to start refreshing tokens, and adding them
+// to the pending request.
+func (t *tokenProvider) enable() {
+ t.tokenMux.Lock()
+ t.token = nil
+ t.tokenMux.Unlock()
+ atomic.StoreUint32(&t.disabled, 0)
+}
+
+type bypassTokenRetrievalError struct {
+ Err error
+}
+
+func (e *bypassTokenRetrievalError) Error() string {
+ return fmt.Sprintf("bypass token retrieval, %v", e.Err)
+}
+
+func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err }
+
+type retryableError struct {
+ Err error
+ isRetryable bool
+}
+
+func (e *retryableError) RetryableError() bool { return e.isRetryable }
+
+func (e *retryableError) Error() string { return e.Err.Error() }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
new file mode 100644
index 000000000..57fa1deeb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
@@ -0,0 +1,1013 @@
+# v1.17.69 (2025-03-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.68 (2025-03-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.67 (2025-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.66 (2025-03-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.65 (2025-03-04.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.64 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.63 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.62 (2025-02-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.61 (2025-02-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.60 (2025-02-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.59 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.58 (2025-02-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.57 (2025-01-31)
+
+* **Bug Fix**: Fix incorrect reference to old s3manager in comments.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.56 (2025-01-30)
+
+* **Bug Fix**: Fix incorrect reference to old s3manager in comments.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.55 (2025-01-29)
+
+* **Bug Fix**: Fix incorrect reference to old s3manager in comments.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.54 (2025-01-24)
+
+* **Bug Fix**: Fix incorrect reference to old s3manager in comments.
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.17.53 (2025-01-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.52 (2025-01-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.51 (2025-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.50 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.49 (2025-01-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.48 (2025-01-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.47 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.46 (2025-01-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.45 (2025-01-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.44 (2024-12-19)
+
+* **Bug Fix**: Fix improper use of printf-style functions.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.43 (2024-12-03.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.42 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.41 (2024-11-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.40 (2024-11-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.39 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.38 (2024-11-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.37 (2024-11-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.36 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.35 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.34 (2024-10-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.33 (2024-10-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.32 (2024-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.31 (2024-10-09)
+
+* **Bug Fix**: Fixup some integration tests.
+
+# v1.17.30 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.29 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.28 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.27 (2024-10-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.26 (2024-10-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.25 (2024-09-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.24 (2024-09-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.23 (2024-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.22 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.21 (2024-09-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.20 (2024-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.19 (2024-09-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.18 (2024-09-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.17 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.16 (2024-08-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.15 (2024-08-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.14 (2024-08-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.13 (2024-08-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.12 (2024-08-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.11 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.10 (2024-08-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.9 (2024-07-24)
+
+* **Documentation**: Clarify region hint and credential usage in HeadBucketRegion.
+
+# v1.17.8 (2024-07-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2024-07-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2024-07-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2024-06-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.25 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.24 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.23 (2024-06-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.22 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.21 (2024-05-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.20 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.19 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.18 (2024-05-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.17 (2024-05-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.16 (2024-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.15 (2024-04-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.14 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2024-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.12 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.11 (2024-03-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2024-03-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2024-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.4 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.3 (2024-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2024-02-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2024-02-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.15 (2024-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.14 (2024-01-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.13 (2024-01-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.12 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.11 (2024-01-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.9 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.8 (2023-12-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.7 (2023-12-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.6 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.5 (2023-12-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.4 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-11-28.3)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2023-11-28.2)
+
+* **Feature**: Add S3Express support.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.4 (2023-11-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.3 (2023-11-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.2 (2023-11-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.1 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-11-17)
+
+* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2023-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2023-11-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2023-11-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.5 (2023-11-09.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2023-11-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2023-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2023-11-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.92 (2023-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.91 (2023-10-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.90 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.89 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.88 (2023-10-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.87 (2023-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.86 (2023-09-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.85 (2023-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.84 (2023-09-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.83 (2023-09-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.82 (2023-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.81 (2023-08-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.80 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.79 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.78 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.77 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.76 (2023-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.75 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.74 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.73 (2023-07-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.72 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.71 (2023-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.70 (2023-06-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.69 (2023-06-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.68 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.67 (2023-05-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.66 (2023-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.65 (2023-05-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.64 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.63 (2023-04-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.62 (2023-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.61 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.60 (2023-03-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.59 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.58 (2023-03-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.57 (2023-03-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.56 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.55 (2023-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.54 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.53 (2023-02-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.52 (2023-02-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.51 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.50 (2023-02-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.49 (2023-01-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.48 (2023-01-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.47 (2023-01-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.46 (2022-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.45 (2022-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.44 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.43 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.42 (2022-11-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.41 (2022-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.40 (2022-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.39 (2022-11-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.38 (2022-11-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.37 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.36 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.35 (2022-10-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.34 (2022-09-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.33 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.32 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.31 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.30 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.29 (2022-08-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.28 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.27 (2022-08-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.26 (2022-08-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.25 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.24 (2022-08-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.23 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.22 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.21 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.20 (2022-07-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.19 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.18 (2022-07-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.17 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.16 (2022-06-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.15 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.14 (2022-05-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.13 (2022-05-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.12 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.11 (2022-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.10 (2022-05-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.9 (2022-05-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.8 (2022-05-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.7 (2022-04-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.6 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.5 (2022-04-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.4 (2022-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.1 (2022-01-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.5 (2021-12-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.4 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.3 (2021-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.2 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.1 (2021-11-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.4 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.3 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.2 (2021-09-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2021-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-08-04)
+
+* **Feature**: adds error handling for defered close calls
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-07-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2021-06-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2021-05-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go
new file mode 100644
index 000000000..4059f9851
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/api.go
@@ -0,0 +1,37 @@
+package manager
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+)
+
+// DeleteObjectsAPIClient is an S3 API client that can invoke the DeleteObjects operation.
+type DeleteObjectsAPIClient interface {
+ DeleteObjects(context.Context, *s3.DeleteObjectsInput, ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error)
+}
+
+// DownloadAPIClient is an S3 API client that can invoke the GetObject operation.
+type DownloadAPIClient interface {
+ GetObject(context.Context, *s3.GetObjectInput, ...func(*s3.Options)) (*s3.GetObjectOutput, error)
+}
+
+// HeadBucketAPIClient is an S3 API client that can invoke the HeadBucket operation.
+type HeadBucketAPIClient interface {
+ HeadBucket(context.Context, *s3.HeadBucketInput, ...func(*s3.Options)) (*s3.HeadBucketOutput, error)
+}
+
+// ListObjectsV2APIClient is an S3 API client that can invoke the ListObjectV2 operation.
+type ListObjectsV2APIClient interface {
+ ListObjectsV2(context.Context, *s3.ListObjectsV2Input, ...func(*s3.Options)) (*s3.ListObjectsV2Output, error)
+}
+
+// UploadAPIClient is an S3 API client that can invoke PutObject, UploadPart, CreateMultipartUpload,
+// CompleteMultipartUpload, and AbortMultipartUpload operations.
+type UploadAPIClient interface {
+ PutObject(context.Context, *s3.PutObjectInput, ...func(*s3.Options)) (*s3.PutObjectOutput, error)
+ UploadPart(context.Context, *s3.UploadPartInput, ...func(*s3.Options)) (*s3.UploadPartOutput, error)
+ CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput, ...func(*s3.Options)) (*s3.CreateMultipartUploadOutput, error)
+ CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput, ...func(*s3.Options)) (*s3.CompleteMultipartUploadOutput, error)
+ AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput, ...func(*s3.Options)) (*s3.AbortMultipartUploadOutput, error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go
new file mode 100644
index 000000000..d3b828979
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/arn.go
@@ -0,0 +1,23 @@
+package manager
+
+import (
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws/arn"
+)
+
+func validateSupportedARNType(bucket string) error {
+ if !arn.IsARN(bucket) {
+ return nil
+ }
+
+ parsedARN, err := arn.Parse(bucket)
+ if err != nil {
+ return err
+ }
+
+ if parsedARN.Service == "s3-object-lambda" {
+ return fmt.Errorf("manager does not support s3-object-lambda service ARNs")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go
new file mode 100644
index 000000000..8c7019529
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go
@@ -0,0 +1,147 @@
+package manager
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const bucketRegionHeader = "X-Amz-Bucket-Region"
+
+// GetBucketRegion will attempt to get the region for a bucket using the
+// client's configured region to determine which AWS partition to perform the query on.
+//
+// A BucketNotFound error will be returned if the bucket does not exist in the
+// AWS partition the client region belongs to.
+//
+// For example to get the region of a bucket which exists in "eu-central-1"
+// you could provide a region hint of "us-west-2".
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2"))
+// if err != nil {
+// log.Println("error:", err)
+// return
+// }
+//
+// bucket := "my-bucket"
+// region, err := manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket)
+// if err != nil {
+// var bnf manager.BucketNotFound
+// if errors.As(err, &bnf) {
+// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region\n", bucket)
+// }
+// return
+// }
+// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+// By default the request will be made to the Amazon S3 endpoint using the virtual-hosted-style addressing.
+//
+// bucketname.s3.us-west-2.amazonaws.com/
+//
+// To configure the GetBucketRegion to make a request via the Amazon
+// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
+// fips-us-gov-west-1) set the EndpointResolver on the config or client the
+// utility is called with.
+//
+// cfg, err := config.LoadDefaultConfig(context.TODO(),
+// config.WithEndpointResolver(
+// aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
+// return aws.Endpoint{URL: "https://s3-fips.us-west-2.amazonaws.com"}, nil
+// }),
+// )
+// if err != nil {
+// panic(err)
+// }
+//
+// If buckets are public, you may use anonymous credential like so.
+//
+// manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket, func(o *s3.Options) {
+// o.Credentials = nil
+// // Or
+// o.Credentials = aws.AnonymousCredentials{}
+// })
+//
+// The request with anonymous credentials will not be signed.
+// Otherwise credentials would be required for private buckets.
+func GetBucketRegion(ctx context.Context, client HeadBucketAPIClient, bucket string, optFns ...func(*s3.Options)) (string, error) {
+ var captureBucketRegion deserializeBucketRegion
+
+ clientOptionFns := make([]func(*s3.Options), len(optFns)+1)
+ clientOptionFns[0] = func(options *s3.Options) {
+ options.APIOptions = append(options.APIOptions, captureBucketRegion.RegisterMiddleware)
+ }
+ copy(clientOptionFns[1:], optFns)
+
+ _, err := client.HeadBucket(ctx, &s3.HeadBucketInput{
+ Bucket: aws.String(bucket),
+ }, clientOptionFns...)
+ if len(captureBucketRegion.BucketRegion) == 0 && err != nil {
+ var httpStatusErr interface {
+ HTTPStatusCode() int
+ }
+ if !errors.As(err, &httpStatusErr) {
+ return "", err
+ }
+
+ if httpStatusErr.HTTPStatusCode() == http.StatusNotFound {
+ return "", &bucketNotFound{}
+ }
+
+ return "", err
+ }
+
+ return captureBucketRegion.BucketRegion, nil
+}
+
+type deserializeBucketRegion struct {
+ BucketRegion string
+}
+
+func (d *deserializeBucketRegion) RegisterMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(d, middleware.After)
+}
+
+func (d *deserializeBucketRegion) ID() string {
+ return "DeserializeBucketRegion"
+}
+
+func (d *deserializeBucketRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse)
+ }
+
+ d.BucketRegion = resp.Header.Get(bucketRegionHeader)
+
+ return out, metadata, err
+}
+
+// BucketNotFound indicates the bucket was not found in the partition when calling GetBucketRegion.
+type BucketNotFound interface {
+ error
+
+ isBucketNotFound()
+}
+
+type bucketNotFound struct{}
+
+func (b *bucketNotFound) Error() string {
+ return "bucket not found"
+}
+
+func (b *bucketNotFound) isBucketNotFound() {}
+
+var _ BucketNotFound = (*bucketNotFound)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go
new file mode 100644
index 000000000..e781aef61
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/buffered_read_seeker.go
@@ -0,0 +1,79 @@
+package manager
+
+import (
+ "io"
+)
+
+// BufferedReadSeeker is buffered io.ReadSeeker
+type BufferedReadSeeker struct {
+ r io.ReadSeeker
+ buffer []byte
+ readIdx, writeIdx int
+}
+
+// NewBufferedReadSeeker returns a new BufferedReadSeeker
+// if len(b) == 0 then the buffer will be initialized to 64 KiB.
+func NewBufferedReadSeeker(r io.ReadSeeker, b []byte) *BufferedReadSeeker {
+ if len(b) == 0 {
+ b = make([]byte, 64*1024)
+ }
+ return &BufferedReadSeeker{r: r, buffer: b}
+}
+
+func (b *BufferedReadSeeker) reset(r io.ReadSeeker) {
+ b.r = r
+ b.readIdx, b.writeIdx = 0, 0
+}
+
+// Read will read up len(p) bytes into p and will return
+// the number of bytes read and any error that occurred.
+// If the len(p) > the buffer size then a single read request
+// will be issued to the underlying io.ReadSeeker for len(p) bytes.
+// A Read request will at most perform a single Read to the underlying
+// io.ReadSeeker, and may return < len(p) if serviced from the buffer.
+func (b *BufferedReadSeeker) Read(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return n, err
+ }
+
+ if b.readIdx == b.writeIdx {
+ if len(p) >= len(b.buffer) {
+ n, err = b.r.Read(p)
+ return n, err
+ }
+ b.readIdx, b.writeIdx = 0, 0
+
+ n, err = b.r.Read(b.buffer)
+ if n == 0 {
+ return n, err
+ }
+
+ b.writeIdx += n
+ }
+
+ n = copy(p, b.buffer[b.readIdx:b.writeIdx])
+ b.readIdx += n
+
+ return n, err
+}
+
+// Seek will position then underlying io.ReadSeeker to the given offset
+// and will clear the buffer.
+func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
+ n, err := b.r.Seek(offset, whence)
+
+ b.reset(b.r)
+
+ return n, err
+}
+
+// ReadAt will read up to len(p) bytes at the given file offset.
+// This will result in the buffer being cleared.
+func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) {
+ _, err := b.Seek(off, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return b.Read(p)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go
new file mode 100644
index 000000000..e2ab143b6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to.go
@@ -0,0 +1,8 @@
+//go:build !windows
+// +build !windows
+
+package manager
+
+func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go
new file mode 100644
index 000000000..1ae881c10
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_read_seeker_write_to_windows.go
@@ -0,0 +1,5 @@
+package manager
+
+func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
+ return NewBufferedReadSeekerWriteToPool(1024 * 1024)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go
new file mode 100644
index 000000000..179fe10f4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from.go
@@ -0,0 +1,8 @@
+//go:build !windows
+// +build !windows
+
+package manager
+
+func defaultDownloadBufferProvider() WriterReadFromProvider {
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go
new file mode 100644
index 000000000..88887ff58
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/default_writer_read_from_windows.go
@@ -0,0 +1,5 @@
+package manager
+
+func defaultDownloadBufferProvider() WriterReadFromProvider {
+ return NewPooledBufferedWriterReadFromProvider(1024 * 1024)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go
new file mode 100644
index 000000000..31171a698
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/doc.go
@@ -0,0 +1,3 @@
+// Package manager provides utilities to upload and download objects from
+// S3 concurrently. Helpful for when working with large objects.
+package manager
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go
new file mode 100644
index 000000000..399436447
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go
@@ -0,0 +1,528 @@
+package manager
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/awsutil"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/smithy-go/logging"
+)
+
+const userAgentKey = "s3-transfer"
+
+// DefaultDownloadPartSize is the default range of bytes to get at a time when
+// using Download().
+const DefaultDownloadPartSize = 1024 * 1024 * 5
+
+// DefaultDownloadConcurrency is the default number of goroutines to spin up
+// when using Download().
+const DefaultDownloadConcurrency = 5
+
+// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download.
+const DefaultPartBodyMaxRetries = 3
+
+type errReadingBody struct {
+ err error
+}
+
+func (e *errReadingBody) Error() string {
+ return fmt.Sprintf("failed to read part body: %v", e.err)
+}
+
+func (e *errReadingBody) Unwrap() error {
+ return e.err
+}
+
+// The Downloader structure that calls Download(). It is safe to call Download()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Downloader's properties is not safe to be done concurrently.
+type Downloader struct {
+ // The size (in bytes) to request from S3 for each part.
+ // The minimum allowed part size is 5MB, and if this value is set to zero,
+ // the DefaultDownloadPartSize value will be used.
+ //
+ // PartSize is ignored if the Range input parameter is provided.
+ PartSize int64
+
+ // PartBodyMaxRetries is the number of retry attempts to make for failed part downloads.
+ PartBodyMaxRetries int
+
+ // Logger to send logging messages to
+ Logger logging.Logger
+
+ // Enable Logging of part download retry attempts
+ LogInterruptedDownloads bool
+
+ // The number of goroutines to spin up in parallel when sending parts.
+ // If this is set to zero, the DefaultDownloadConcurrency value will be used.
+ //
+ // Concurrency of 1 will download the parts sequentially.
+ //
+ // Concurrency is ignored if the Range input parameter is provided.
+ Concurrency int
+
+ // An S3 client to use when performing downloads.
+ S3 DownloadAPIClient
+
+ // List of client options that will be passed down to individual API
+ // operation requests made by the downloader.
+ ClientOptions []func(*s3.Options)
+
+ // Defines the buffer strategy used when downloading a part.
+ //
+ // If a WriterReadFromProvider is given the Download manager
+ // will pass the io.WriterAt of the Download request to the provider
+ // and will use the returned WriterReadFrom from the provider as the
+ // destination writer when copying from http response body.
+ BufferProvider WriterReadFromProvider
+}
+
+// WithDownloaderClientOptions appends to the Downloader's API request options.
+func WithDownloaderClientOptions(opts ...func(*s3.Options)) func(*Downloader) {
+ return func(d *Downloader) {
+ d.ClientOptions = append(d.ClientOptions, opts...)
+ }
+}
+
+// NewDownloader creates a new Downloader instance to downloads objects from
+// S3 in concurrent chunks. Pass in additional functional options to customize
+// the downloader behavior. Requires a client.ConfigProvider in order to create
+// a S3 service client. The session.Session satisfies the client.ConfigProvider
+// interface.
+//
+// Example:
+//
+// // Load AWS Config
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// // Create an S3 client using the loaded configuration
+// s3.NewFromConfig(cfg)
+//
+// // Create a downloader passing it the S3 client
+// downloader := manager.NewDownloader(s3.NewFromConfig(cfg))
+//
+// // Create a downloader with the client and custom downloader options
+// downloader := manager.NewDownloader(client, func(d *manager.Downloader) {
+// d.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewDownloader(c DownloadAPIClient, options ...func(*Downloader)) *Downloader {
+ d := &Downloader{
+ S3: c,
+ PartSize: DefaultDownloadPartSize,
+ PartBodyMaxRetries: DefaultPartBodyMaxRetries,
+ Concurrency: DefaultDownloadConcurrency,
+ BufferProvider: defaultDownloadBufferProvider(),
+ }
+ for _, option := range options {
+ option(d)
+ }
+
+ return d
+}
+
+// Download downloads an object in S3 and writes the payload into w
+// using concurrent GET requests. The n int64 returned is the size of the object downloaded
+// in bytes.
+//
+// DownloadWithContext is the same as Download with the additional support for
+// Context input parameters. The Context must not be nil. A nil Context will
+// cause a panic. Use the Context to add deadlining, timeouts, etc. The
+// DownloadWithContext may create sub-contexts for individual underlying
+// requests.
+//
+// Additional functional options can be provided to configure the individual
+// download. These options are copies of the Downloader instance Download is
+// called from. Modifying the options will not impact the original Downloader
+// instance. Use the WithDownloaderClientOptions helper function to pass in request
+// options that will be applied to all API operations made with this downloader.
+//
+// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
+// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. In case you download
+// files into memory do not forget to pre-allocate memory to avoid additional allocations
+// and GC runs.
+//
+// Example:
+//
+// // pre-allocate in memory buffer, where headObject type is *s3.HeadObjectOutput
+// buf := make([]byte, int(headObject.ContentLength))
+// // wrap with aws.WriteAtBuffer
+// w := manager.NewWriteAtBuffer(buf)
+// // download file into the memory
+// numBytesDownloaded, err := downloader.Download(ctx, w, &s3.GetObjectInput{
+// Bucket: aws.String(bucket),
+// Key: aws.String(item),
+// })
+//
+// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
+// download the parts from S3 sequentially.
+//
+// It is safe to call this method concurrently across goroutines.
+//
+// If the GetObjectInput's Range value is provided that will cause the downloader
+// to perform a single GetObjectInput request for that object's range. This will
+// caused the part size, and concurrency configurations to be ignored.
+func (d Downloader) Download(ctx context.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
+ if err := validateSupportedARNType(aws.ToString(input.Bucket)); err != nil {
+ return 0, err
+ }
+
+ impl := downloader{w: w, in: input, cfg: d, ctx: ctx}
+
+ // Copy ClientOptions
+ clientOptions := make([]func(*s3.Options), 0, len(impl.cfg.ClientOptions)+1)
+ clientOptions = append(clientOptions, func(o *s3.Options) {
+ o.APIOptions = append(o.APIOptions,
+ middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey),
+ addFeatureUserAgent, // yes, there are two of these
+ )
+ })
+ clientOptions = append(clientOptions, impl.cfg.ClientOptions...)
+ impl.cfg.ClientOptions = clientOptions
+
+ for _, option := range options {
+ option(&impl.cfg)
+ }
+
+ // Ensures we don't need nil checks later on
+ impl.cfg.Logger = logging.WithContext(ctx, impl.cfg.Logger)
+
+ impl.partBodyMaxRetries = d.PartBodyMaxRetries
+
+ impl.totalBytes = -1
+ if impl.cfg.Concurrency == 0 {
+ impl.cfg.Concurrency = DefaultDownloadConcurrency
+ }
+
+ if impl.cfg.PartSize == 0 {
+ impl.cfg.PartSize = DefaultDownloadPartSize
+ }
+
+ return impl.download()
+}
+
+// downloader is the implementation structure used internally by Downloader.
+type downloader struct {
+ ctx context.Context
+ cfg Downloader
+
+ in *s3.GetObjectInput
+ w io.WriterAt
+
+ wg sync.WaitGroup
+ m sync.Mutex
+
+ pos int64
+ totalBytes int64
+ written int64
+ err error
+
+ partBodyMaxRetries int
+}
+
+// download performs the implementation of the object download across ranged
+// GETs.
+func (d *downloader) download() (n int64, err error) {
+ // If range is specified fall back to single download of that range
+ // this enables the functionality of ranged gets with the downloader but
+ // at the cost of no multipart downloads.
+ if rng := aws.ToString(d.in.Range); len(rng) > 0 {
+ d.downloadRange(rng)
+ return d.written, d.err
+ }
+
+ // Spin off first worker to check additional header information
+ d.getChunk()
+
+ if total := d.getTotalBytes(); total >= 0 {
+ // Spin up workers
+ ch := make(chan dlchunk, d.cfg.Concurrency)
+
+ for i := 0; i < d.cfg.Concurrency; i++ {
+ d.wg.Add(1)
+ go d.downloadPart(ch)
+ }
+
+ // Assign work
+ for d.getErr() == nil {
+ if d.pos >= total {
+ break // We're finished queuing chunks
+ }
+
+ // Queue the next range of bytes to read.
+ ch <- dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
+ d.pos += d.cfg.PartSize
+ }
+
+ // Wait for completion
+ close(ch)
+ d.wg.Wait()
+ } else {
+ // Checking if we read anything new
+ for d.err == nil {
+ d.getChunk()
+ }
+
+ // We expect a 416 error letting us know we are done downloading the
+ // total bytes. Since we do not know the content's length, this will
+ // keep grabbing chunks of data until the range of bytes specified in
+ // the request is out of range of the content. Once, this happens, a
+ // 416 should occur.
+ var responseError interface {
+ HTTPStatusCode() int
+ }
+ if errors.As(d.err, &responseError) {
+ if responseError.HTTPStatusCode() == http.StatusRequestedRangeNotSatisfiable {
+ d.err = nil
+ }
+ }
+ }
+
+ // Return error
+ return d.written, d.err
+}
+
+// downloadPart is an individual goroutine worker reading from the ch channel
+// and performing a GetObject request on the data with a given byte range.
+//
+// If this is the first worker, this operation also resolves the total number
+// of bytes to be read so that the worker manager knows when it is finished.
+func (d *downloader) downloadPart(ch chan dlchunk) {
+ defer d.wg.Done()
+ for {
+ chunk, ok := <-ch
+ if !ok {
+ break
+ }
+ if d.getErr() != nil {
+ // Drain the channel if there is an error, to prevent deadlocking
+ // of download producer.
+ continue
+ }
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+ }
+}
+
+// getChunk grabs a chunk of data from the body.
+// Not thread safe. Should only used when grabbing data on a single thread.
+func (d *downloader) getChunk() {
+ if d.getErr() != nil {
+ return
+ }
+
+ chunk := dlchunk{w: d.w, start: d.pos, size: d.cfg.PartSize}
+ d.pos += d.cfg.PartSize
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+}
+
+// downloadRange downloads an Object given the passed in Byte-Range value.
+// The chunk used down download the range will be configured for that range.
+func (d *downloader) downloadRange(rng string) {
+ if d.getErr() != nil {
+ return
+ }
+
+ chunk := dlchunk{w: d.w, start: d.pos}
+ // Ranges specified will short circuit the multipart download
+ chunk.withRange = rng
+
+ if err := d.downloadChunk(chunk); err != nil {
+ d.setErr(err)
+ }
+
+ // Update the position based on the amount of data received.
+ d.pos = d.written
+}
+
+// downloadChunk downloads the chunk from s3
+func (d *downloader) downloadChunk(chunk dlchunk) error {
+ var params s3.GetObjectInput
+ awsutil.Copy(¶ms, d.in)
+
+ // Get the next byte range of data
+ params.Range = aws.String(chunk.ByteRange())
+
+ var n int64
+ var err error
+ for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
+ n, err = d.tryDownloadChunk(¶ms, &chunk)
+ if err == nil {
+ break
+ }
+ // Check if the returned error is an errReadingBody.
+ // If err is errReadingBody this indicates that an error
+ // occurred while copying the http response body.
+ // If this occurs we unwrap the err to set the underlying error
+ // and attempt any remaining retries.
+ if bodyErr, ok := err.(*errReadingBody); ok {
+ err = bodyErr.Unwrap()
+ } else {
+ return err
+ }
+
+ chunk.cur = 0
+
+ d.cfg.Logger.Logf(logging.Debug,
+ "object part body download interrupted %s, err, %v, retrying attempt %d",
+ aws.ToString(params.Key), err, retry)
+ }
+
+ d.incrWritten(n)
+
+ return err
+}
+
+func (d *downloader) tryDownloadChunk(params *s3.GetObjectInput, w io.Writer) (int64, error) {
+ cleanup := func() {}
+ if d.cfg.BufferProvider != nil {
+ w, cleanup = d.cfg.BufferProvider.GetReadFrom(w)
+ }
+ defer cleanup()
+
+ resp, err := d.cfg.S3.GetObject(d.ctx, params, d.cfg.ClientOptions...)
+ if err != nil {
+ return 0, err
+ }
+ d.setTotalBytes(resp) // Set total if not yet set.
+
+ var src io.Reader = resp.Body
+ if d.cfg.BufferProvider != nil {
+ src = &suppressWriterAt{suppressed: src}
+ }
+ n, err := io.Copy(w, src)
+ resp.Body.Close()
+ if err != nil {
+ return n, &errReadingBody{err: err}
+ }
+
+ return n, nil
+}
+
+// getTotalBytes is a thread-safe getter for retrieving the total byte status.
+func (d *downloader) getTotalBytes() int64 {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.totalBytes
+}
+
+// setTotalBytes is a thread-safe setter for setting the total byte status.
+// Will extract the object's total bytes from the Content-Range if the file
+// will be chunked, or Content-Length. Content-Length is used when the response
+// does not include a Content-Range. Meaning the object was not chunked. This
+// occurs when the full file fits within the PartSize directive.
+func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ if d.totalBytes >= 0 {
+ return
+ }
+
+ if resp.ContentRange == nil {
+ // ContentRange is nil when the full file contents is provided, and
+ // is not chunked. Use ContentLength instead.
+ if aws.ToInt64(resp.ContentLength) > 0 {
+ d.totalBytes = aws.ToInt64(resp.ContentLength)
+ return
+ }
+ } else {
+ parts := strings.Split(*resp.ContentRange, "/")
+
+ total := int64(-1)
+ var err error
+ // Checking for whether or not a numbered total exists
+ // If one does not exist, we will assume the total to be -1, undefined,
+ // and sequentially download each chunk until hitting a 416 error
+ totalStr := parts[len(parts)-1]
+ if totalStr != "*" {
+ total, err = strconv.ParseInt(totalStr, 10, 64)
+ if err != nil {
+ d.err = err
+ return
+ }
+ }
+
+ d.totalBytes = total
+ }
+}
+
+func (d *downloader) incrWritten(n int64) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.written += n
+}
+
+// getErr is a thread-safe getter for the error object
+func (d *downloader) getErr() error {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ return d.err
+}
+
+// setErr is a thread-safe setter for the error object
+func (d *downloader) setErr(e error) {
+ d.m.Lock()
+ defer d.m.Unlock()
+
+ d.err = e
+}
+
+// dlchunk represents a single chunk of data to write by the worker routine.
+// This structure also implements an io.SectionReader style interface for
+// io.WriterAt, effectively making it an io.SectionWriter (which does not
+// exist).
+type dlchunk struct {
+ w io.WriterAt
+ start int64
+ size int64
+ cur int64
+
+ // specifies the byte range the chunk should be downloaded with.
+ withRange string
+}
+
+// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start
+// position to its end (or EOF).
+//
+// If a range is specified on the dlchunk the size will be ignored when writing.
+// as the total size may not of be known ahead of time.
+func (c *dlchunk) Write(p []byte) (n int, err error) {
+ if c.cur >= c.size && len(c.withRange) == 0 {
+ return 0, io.EOF
+ }
+
+ n, err = c.w.WriteAt(p, c.start+c.cur)
+ c.cur += int64(n)
+
+ return
+}
+
+// ByteRange returns a HTTP Byte-Range header value that should be used by the
+// client to request the chunk's range.
+func (c *dlchunk) ByteRange() string {
+ if len(c.withRange) != 0 {
+ return c.withRange
+ }
+
+ return fmt.Sprintf("bytes=%d-%d", c.start, c.start+c.size-1)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
new file mode 100644
index 000000000..b30522a2c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package manager
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.17.69"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go
new file mode 100644
index 000000000..6b93a3bc4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/pool.go
@@ -0,0 +1,251 @@
+package manager
+
+import (
+ "context"
+ "fmt"
+ "sync"
+)
+
+type byteSlicePool interface {
+ Get(context.Context) (*[]byte, error)
+ Put(*[]byte)
+ ModifyCapacity(int)
+ SliceSize() int64
+ Close()
+}
+
+type maxSlicePool struct {
+ // allocator is defined as a function pointer to allow
+ // for test cases to instrument custom tracers when allocations
+ // occur.
+ allocator sliceAllocator
+
+ slices chan *[]byte
+ allocations chan struct{}
+ capacityChange chan struct{}
+
+ max int
+ sliceSize int64
+
+ mtx sync.RWMutex
+}
+
+func newMaxSlicePool(sliceSize int64) *maxSlicePool {
+ p := &maxSlicePool{sliceSize: sliceSize}
+ p.allocator = p.newSlice
+
+ return p
+}
+
+var errZeroCapacity = fmt.Errorf("get called on zero capacity pool")
+
+func (p *maxSlicePool) Get(ctx context.Context) (*[]byte, error) {
+ // check if context is canceled before attempting to get a slice
+ // this ensures priority is given to the cancel case first
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ p.mtx.RLock()
+
+ for {
+ select {
+ case bs, ok := <-p.slices:
+ p.mtx.RUnlock()
+ if !ok {
+ // attempt to get on a zero capacity pool
+ return nil, errZeroCapacity
+ }
+ return bs, nil
+ case <-ctx.Done():
+ p.mtx.RUnlock()
+ return nil, ctx.Err()
+ default:
+ // pass
+ }
+
+ select {
+ case _, ok := <-p.allocations:
+ p.mtx.RUnlock()
+ if !ok {
+ // attempt to get on a zero capacity pool
+ return nil, errZeroCapacity
+ }
+ return p.allocator(), nil
+ case <-ctx.Done():
+ p.mtx.RUnlock()
+ return nil, ctx.Err()
+ default:
+ // In the event that there are no slices or allocations available
+ // This prevents some deadlock situations that can occur around sync.RWMutex
+ // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock.
+ // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where
+ // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock,
+ // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity.
+
+ // Short-circuit if the pool capacity is zero.
+ if p.max == 0 {
+ p.mtx.RUnlock()
+ return nil, errZeroCapacity
+ }
+
+ // Since we will be releasing the read-lock we need to take the reference to the channel.
+ // Since channels are references we will still get notified if slices are added, or if
+ // the channel is closed due to a capacity modification. This specifically avoids a data race condition
+ // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock.
+ c := p.capacityChange
+
+ p.mtx.RUnlock()
+
+ select {
+ case _ = <-c:
+ p.mtx.RLock()
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+ }
+}
+
+func (p *maxSlicePool) Put(bs *[]byte) {
+ p.mtx.RLock()
+ defer p.mtx.RUnlock()
+
+ if p.max == 0 {
+ return
+ }
+
+ select {
+ case p.slices <- bs:
+ p.notifyCapacity()
+ default:
+ // If the new channel when attempting to add the slice then we drop the slice.
+ // The logic here is to prevent a deadlock situation if channel is already at max capacity.
+ // Allows us to reap allocations that are returned and are no longer needed.
+ }
+}
+
+func (p *maxSlicePool) ModifyCapacity(delta int) {
+ if delta == 0 {
+ return
+ }
+
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ p.max += delta
+
+ if p.max == 0 {
+ p.empty()
+ return
+ }
+
+ if p.capacityChange != nil {
+ close(p.capacityChange)
+ }
+ p.capacityChange = make(chan struct{}, p.max)
+
+ origAllocations := p.allocations
+ p.allocations = make(chan struct{}, p.max)
+
+ newAllocs := len(origAllocations) + delta
+ for i := 0; i < newAllocs; i++ {
+ p.allocations <- struct{}{}
+ }
+
+ if origAllocations != nil {
+ close(origAllocations)
+ }
+
+ origSlices := p.slices
+ p.slices = make(chan *[]byte, p.max)
+ if origSlices == nil {
+ return
+ }
+
+ close(origSlices)
+ for bs := range origSlices {
+ select {
+ case p.slices <- bs:
+ default:
+ // If the new channel blocks while adding slices from the old channel
+ // then we drop the slice. The logic here is to prevent a deadlock situation
+ // if the new channel has a smaller capacity then the old.
+ }
+ }
+}
+
+func (p *maxSlicePool) notifyCapacity() {
+ select {
+ case p.capacityChange <- struct{}{}:
+ default:
+ // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized
+ // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur.
+ }
+}
+
+func (p *maxSlicePool) SliceSize() int64 {
+ return p.sliceSize
+}
+
+func (p *maxSlicePool) Close() {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+ p.empty()
+}
+
+func (p *maxSlicePool) empty() {
+ p.max = 0
+
+ if p.capacityChange != nil {
+ close(p.capacityChange)
+ p.capacityChange = nil
+ }
+
+ if p.allocations != nil {
+ close(p.allocations)
+ for range p.allocations {
+ // drain channel
+ }
+ p.allocations = nil
+ }
+
+ if p.slices != nil {
+ close(p.slices)
+ for range p.slices {
+ // drain channel
+ }
+ p.slices = nil
+ }
+}
+
+func (p *maxSlicePool) newSlice() *[]byte {
+ bs := make([]byte, p.sliceSize)
+ return &bs
+}
+
+type returnCapacityPoolCloser struct {
+ byteSlicePool
+ returnCapacity int
+}
+
+func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) {
+ if delta > 0 {
+ n.returnCapacity = -1 * delta
+ }
+ n.byteSlicePool.ModifyCapacity(delta)
+}
+
+func (n *returnCapacityPoolCloser) Close() {
+ if n.returnCapacity < 0 {
+ n.byteSlicePool.ModifyCapacity(n.returnCapacity)
+ }
+}
+
+type sliceAllocator func() *[]byte
+
+var newByteSlicePool = func(sliceSize int64) byteSlicePool {
+ return newMaxSlicePool(sliceSize)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go
new file mode 100644
index 000000000..ce117c32a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/read_seeker_write_to.go
@@ -0,0 +1,65 @@
+package manager
+
+import (
+ "io"
+ "sync"
+)
+
+// ReadSeekerWriteTo defines an interface implementing io.WriteTo and io.ReadSeeker
+type ReadSeekerWriteTo interface {
+ io.ReadSeeker
+ io.WriterTo
+}
+
+// BufferedReadSeekerWriteTo wraps a BufferedReadSeeker with an io.WriteAt
+// implementation.
+type BufferedReadSeekerWriteTo struct {
+ *BufferedReadSeeker
+}
+
+// WriteTo writes to the given io.Writer from BufferedReadSeeker until there's no more data to write or
+// an error occurs. Returns the number of bytes written and any error encountered during the write.
+func (b *BufferedReadSeekerWriteTo) WriteTo(writer io.Writer) (int64, error) {
+ return io.Copy(writer, b.BufferedReadSeeker)
+}
+
+// ReadSeekerWriteToProvider provides an implementation of io.WriteTo for an io.ReadSeeker
+type ReadSeekerWriteToProvider interface {
+ GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func())
+}
+
+// BufferedReadSeekerWriteToPool uses a sync.Pool to create and reuse
+// []byte slices for buffering parts in memory
+type BufferedReadSeekerWriteToPool struct {
+ pool sync.Pool
+}
+
+// NewBufferedReadSeekerWriteToPool will return a new BufferedReadSeekerWriteToPool that will create
+// a pool of reusable buffers . If size is less then < 64 KiB then the buffer
+// will default to 64 KiB. Reason: io.Copy from writers or readers that don't support io.WriteTo or io.ReadFrom
+// respectively will default to copying 32 KiB.
+func NewBufferedReadSeekerWriteToPool(size int) *BufferedReadSeekerWriteToPool {
+ if size < 65536 {
+ size = 65536
+ }
+
+ return &BufferedReadSeekerWriteToPool{
+ pool: sync.Pool{New: func() interface{} {
+ return make([]byte, size)
+ }},
+ }
+}
+
+// GetWriteTo will wrap the provided io.ReadSeeker with a BufferedReadSeekerWriteTo.
+// The provided cleanup must be called after operations have been completed on the
+// returned io.ReadSeekerWriteTo in order to signal the return of resources to the pool.
+func (p *BufferedReadSeekerWriteToPool) GetWriteTo(seeker io.ReadSeeker) (r ReadSeekerWriteTo, cleanup func()) {
+ buffer := p.pool.Get().([]byte)
+
+ r = &BufferedReadSeekerWriteTo{BufferedReadSeeker: NewBufferedReadSeeker(seeker, buffer)}
+ cleanup = func() {
+ p.pool.Put(buffer)
+ }
+
+ return r, cleanup
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go
new file mode 100644
index 000000000..399d81aae
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/types.go
@@ -0,0 +1,187 @@
+package manager
+
+import (
+ "io"
+ "sync"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
+// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
+// streaming payload API operations.
+//
+// A readSeekCloser wrapping an nonseekable io.Reader used in an API operation's
+// input will prevent that operation being retried in the case of
+// network errors, and cause operation requests to fail if yhe operation
+// requires payload signing.
+//
+// Note: If using with S3 PutObject to stream an object upload. The SDK's S3
+// Upload Manager(manager.Uploader) provides support for streaming
+// with the ability to retry network errors.
+func ReadSeekCloser(r io.Reader) *ReaderSeekerCloser {
+ return &ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// seekerLen attempts to get the number of bytes remaining at the seeker's
+// current position. Returns the number of bytes remaining or error.
+func seekerLen(s io.Seeker) (int64, error) {
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := s.(type) {
+ case *ReaderSeekerCloser:
+ return v.GetLen()
+ }
+
+ return computeSeekerLength(s)
+}
+
+// GetLen returns the length of the bytes remaining in the underlying reader.
+// Checks first for Len(), then io.Seeker to determine the size of the
+// underlying reader.
+//
+// Will return -1 if the length cannot be determined.
+func (r *ReaderSeekerCloser) GetLen() (int64, error) {
+ if l, ok := r.HasLen(); ok {
+ return int64(l), nil
+ }
+
+ if s, ok := r.r.(io.Seeker); ok {
+ return computeSeekerLength(s)
+ }
+
+ return -1, nil
+}
+
+func computeSeekerLength(s io.Seeker) (int64, error) {
+ curOffset, err := s.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := s.Seek(0, io.SeekEnd)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = s.Seek(curOffset, io.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
+}
+
+// HasLen returns the length of the underlying reader if the value implements
+// the Len() int method.
+func (r *ReaderSeekerCloser) HasLen() (int, bool) {
+ type lenner interface {
+ Len() int
+ }
+
+ if lr, ok := r.r.(lenner); ok {
+ return lr.Len(), true
+ }
+
+ return 0, false
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be
+// returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r *ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r *ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r *ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r *ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go
new file mode 100644
index 000000000..93133aee5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go
@@ -0,0 +1,912 @@
+package manager
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "sync"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/awsutil"
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithymiddleware "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
+// on Amazon S3.
+const MaxUploadParts int32 = 10000
+
+// MinUploadPartSize is the minimum allowed part size when uploading a part to
+// Amazon S3.
+const MinUploadPartSize int64 = 1024 * 1024 * 5
+
+// DefaultUploadPartSize is the default part size to buffer chunks of a
+// payload into.
+const DefaultUploadPartSize = MinUploadPartSize
+
+// DefaultUploadConcurrency is the default number of goroutines to spin up when
+// using Upload().
+const DefaultUploadConcurrency = 5
+
+// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
+// will satisfy this interface when a multi part upload failed to upload all
+// chucks to S3. In the case of a failure the UploadID is needed to operate on
+// the chunks, if any, which were uploaded.
+//
+// Example:
+//
+// u := manager.NewUploader(client)
+// output, err := u.upload(context.Background(), input)
+// if err != nil {
+// var multierr manager.MultiUploadFailure
+// if errors.As(err, &multierr) {
+// fmt.Printf("upload failure UploadID=%s, %s\n", multierr.UploadID(), multierr.Error())
+// } else {
+// fmt.Printf("upload failure, %s\n", err.Error())
+// }
+// }
+type MultiUploadFailure interface {
+ error
+
+ // UploadID returns the upload id for the S3 multipart upload that failed.
+ UploadID() string
+}
+
+// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
+// Composed of BaseError for code, message, and original error
+//
+// Should be used for an error that occurred failing a S3 multipart upload,
+// and a upload ID is available. If an uploadID is not available a more relevant
+type multiUploadError struct {
+ err error
+
+ // ID for multipart upload which failed.
+ uploadID string
+}
+
+// batchItemError returns the string representation of the error.
+//
+// # See apierr.BaseError ErrorWithExtra for output format
+//
+// Satisfies the error interface.
+func (m *multiUploadError) Error() string {
+ var extra string
+ if m.err != nil {
+ extra = fmt.Sprintf(", cause: %s", m.err.Error())
+ }
+ return fmt.Sprintf("upload multipart failed, upload id: %s%s", m.uploadID, extra)
+}
+
+// Unwrap returns the underlying error that cause the upload failure
+func (m *multiUploadError) Unwrap() error {
+ return m.err
+}
+
+// UploadID returns the id of the S3 upload which failed.
+func (m *multiUploadError) UploadID() string {
+ return m.uploadID
+}
+
+// UploadOutput represents a response from the Upload() call.
+type UploadOutput struct {
+ // The URL where the object was uploaded to.
+ Location string
+
+ // The ID for a multipart upload to S3. In the case of an error the error
+ // can be cast to the MultiUploadFailure interface to extract the upload ID.
+ // Will be empty string if multipart upload was not used, and the object
+ // was uploaded as a single PutObject call.
+ UploadID string
+
+ // The list of parts that were uploaded and their checksums. Will be empty
+ // if multipart upload was not used, and the object was uploaded as a
+ // single PutObject call.
+ CompletedParts []types.CompletedPart
+
+ // Indicates whether the uploaded object uses an S3 Bucket Key for server-side
+ // encryption with Amazon Web Services KMS (SSE-KMS).
+ BucketKeyEnabled bool
+
+ // The base64-encoded, 32-bit CRC32 checksum of the object.
+ ChecksumCRC32 *string
+
+ // The base64-encoded, 32-bit CRC32C checksum of the object.
+ ChecksumCRC32C *string
+
+ // The base64-encoded, 64-bit CRC64NVME checksum of the object.
+ ChecksumCRC64NVME *string
+
+ // The base64-encoded, 160-bit SHA-1 digest of the object.
+ ChecksumSHA1 *string
+
+ // The base64-encoded, 256-bit SHA-256 digest of the object.
+ ChecksumSHA256 *string
+
+ // Entity tag for the uploaded object.
+ ETag *string
+
+ // If the object expiration is configured, this will contain the expiration date
+ // (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string
+
+ // The object key of the newly created object.
+ Key *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged types.RequestCharged
+
+ // If present, specifies the ID of the Amazon Web Services Key Management Service
+ // (Amazon Web Services KMS) symmetric customer managed customer master key (CMK)
+ // that was used for the object.
+ SSEKMSKeyId *string
+
+ // If you specified server-side encryption either with an Amazon S3-managed
+ // encryption key or an Amazon Web Services KMS customer master key (CMK) in your
+ // initiate multipart upload request, the response includes this header. It
+ // confirms the encryption algorithm that Amazon S3 used to encrypt the object.
+ ServerSideEncryption types.ServerSideEncryption
+
+ // The version of the object that was uploaded. Will only be populated if
+ // the S3 Bucket is versioned. If the bucket is not versioned this field
+ // will not be set.
+ VersionID *string
+}
+
+// WithUploaderRequestOptions appends to the Uploader's API client options.
+func WithUploaderRequestOptions(opts ...func(*s3.Options)) func(*Uploader) {
+ return func(u *Uploader) {
+ u.ClientOptions = append(u.ClientOptions, opts...)
+ }
+}
+
+// The Uploader structure that calls Upload(). It is safe to call Upload()
+// on this structure for multiple objects and across concurrent goroutines.
+// Mutating the Uploader's properties is not safe to be done concurrently.
+//
+// # Pre-computed Checksums
+//
+// Care must be taken when using pre-computed checksums the transfer upload
+// manager. The format and value of the checksum differs based on if the upload
+// will preformed as a single or multipart upload.
+//
+// Uploads that are smaller than the Uploader's PartSize will be uploaded using
+// the PutObject API operation. Pre-computed checksum of the uploaded object's
+// content are valid for these single part uploads. If the checksum provided
+// does not match the uploaded content the upload will fail.
+//
+// Uploads that are larger than the Uploader's PartSize will be uploaded using
+// multi-part upload. The Pre-computed checksums for these uploads are a
+// checksum of checksums of each part. Not a checksum of the full uploaded
+// bytes. With the format of "-", (e.g.
+// "DUoRhQ==-3"). If a pre-computed checksum is provided that does not match
+// this format, as matches the content uploaded, the upload will fail.
+//
+// ContentMD5 for multipart upload is explicitly ignored for multipart upload,
+// and its value is suppressed.
+//
+// # Automatically Computed Checksums
+//
+// When the ChecksumAlgorithm member of Upload's input parameter PutObjectInput
+// is set to a valid value, the SDK will automatically compute the checksum of
+// the individual uploaded parts. The UploadOutput result from Upload will
+// include the checksum of part checksums provided by S3
+// CompleteMultipartUpload API call.
+type Uploader struct {
+ // The buffer size (in bytes) to use when buffering data into chunks and
+ // sending them as parts to S3. The minimum allowed part size is 5MB, and
+ // if this value is set to zero, the DefaultUploadPartSize value will be used.
+ PartSize int64
+
+ // The number of goroutines to spin up in parallel per call to Upload when
+ // sending parts. If this is set to zero, the DefaultUploadConcurrency value
+ // will be used.
+ //
+ // The concurrency pool is not shared between calls to Upload.
+ Concurrency int
+
+ // Setting this value to true will cause the SDK to avoid calling
+ // AbortMultipartUpload on a failure, leaving all successfully uploaded
+ // parts on S3 for manual recovery.
+ //
+ // Note that storing parts of an incomplete multipart upload counts towards
+ // space usage on S3 and will add additional costs if not cleaned up.
+ LeavePartsOnError bool
+
+ // MaxUploadParts is the max number of parts which will be uploaded to S3.
+ // Will be used to calculate the partsize of the object to be uploaded.
+ // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
+ // as 100, 50MB parts. With a limited of s3.MaxUploadParts (10,000 parts).
+ //
+ // MaxUploadParts must not be used to limit the total number of bytes uploaded.
+ // Use a type like to io.LimitReader (https://golang.org/pkg/io/#LimitedReader)
+ // instead. An io.LimitReader is helpful when uploading an unbounded reader
+ // to S3, and you know its maximum size. Otherwise the reader's io.EOF returned
+ // error must be used to signal end of stream.
+ //
+ // Defaults to package const's MaxUploadParts value.
+ MaxUploadParts int32
+
+ // The client to use when uploading to S3.
+ S3 UploadAPIClient
+
+ // List of request options that will be passed down to individual API
+ // operation requests made by the uploader.
+ ClientOptions []func(*s3.Options)
+
+ // Defines the buffer strategy used when uploading a part
+ BufferProvider ReadSeekerWriteToProvider
+
+ // partPool allows for the re-usage of streaming payload part buffers between upload calls
+ partPool byteSlicePool
+}
+
+// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
+// additional functional options to customize the uploader's behavior. Requires a
+// client.ConfigProvider in order to create a S3 service client. The session.Session
+// satisfies the client.ConfigProvider interface.
+//
+// Example:
+//
+// // Load AWS Config
+// cfg, err := config.LoadDefaultConfig(context.TODO())
+// if err != nil {
+// panic(err)
+// }
+//
+// // Create an S3 Client with the config
+// client := s3.NewFromConfig(cfg)
+//
+// // Create an uploader passing it the client
+// uploader := manager.NewUploader(client)
+//
+// // Create an uploader with the client and custom options
+// uploader := manager.NewUploader(client, func(u *manager.Uploader) {
+// u.PartSize = 64 * 1024 * 1024 // 64MB per part
+// })
+func NewUploader(client UploadAPIClient, options ...func(*Uploader)) *Uploader {
+ u := &Uploader{
+ S3: client,
+ PartSize: DefaultUploadPartSize,
+ Concurrency: DefaultUploadConcurrency,
+ LeavePartsOnError: false,
+ MaxUploadParts: MaxUploadParts,
+ BufferProvider: defaultUploadBufferProvider(),
+ }
+
+ for _, option := range options {
+ option(u)
+ }
+
+ u.partPool = newByteSlicePool(u.PartSize)
+
+ return u
+}
+
+// Upload uploads an object to S3, intelligently buffering large
+// files into smaller chunks and sending them in parallel across multiple
+// goroutines. You can configure the buffer size and concurrency through the
+// Uploader parameters.
+//
+// Additional functional options can be provided to configure the individual
+// upload. These options are copies of the Uploader instance Upload is called from.
+// Modifying the options will not impact the original Uploader instance.
+//
+// Use the WithUploaderRequestOptions helper function to pass in request
+// options that will be applied to all API operations made with this uploader.
+//
+// It is safe to call this method concurrently across goroutines.
+func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*Uploader)) (
+ *UploadOutput, error,
+) {
+ i := uploader{in: input, cfg: u, ctx: ctx}
+
+ // Copy ClientOptions
+ clientOptions := make([]func(*s3.Options), 0, len(i.cfg.ClientOptions)+1)
+ clientOptions = append(clientOptions, func(o *s3.Options) {
+ o.APIOptions = append(o.APIOptions,
+ middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey),
+ addFeatureUserAgent, // yes, there are two of these
+ func(s *smithymiddleware.Stack) error {
+ return s.Finalize.Insert(&setS3ExpressDefaultChecksum{}, "ResolveEndpointV2", smithymiddleware.After)
+ },
+ )
+ })
+ clientOptions = append(clientOptions, i.cfg.ClientOptions...)
+ i.cfg.ClientOptions = clientOptions
+
+ for _, opt := range opts {
+ opt(&i.cfg)
+ }
+
+ return i.upload()
+}
+
+// internal structure to manage an upload to S3.
+type uploader struct {
+ ctx context.Context
+ cfg Uploader
+
+ in *s3.PutObjectInput
+
+ readerPos int64 // current reader position
+ totalSize int64 // set to -1 if the size is not known
+}
+
+// internal logic for deciding whether to upload a single part or use a
+// multipart upload.
+func (u *uploader) upload() (*UploadOutput, error) {
+ if err := u.init(); err != nil {
+ return nil, fmt.Errorf("unable to initialize upload: %w", err)
+ }
+ defer u.cfg.partPool.Close()
+
+ if u.cfg.PartSize < MinUploadPartSize {
+ return nil, fmt.Errorf("part size must be at least %d bytes", MinUploadPartSize)
+ }
+
+ // Do one read to determine if we have more than one part
+ reader, _, cleanup, err := u.nextReader()
+ if err == io.EOF { // single part
+ return u.singlePart(reader, cleanup)
+ } else if err != nil {
+ cleanup()
+ return nil, fmt.Errorf("read upload data failed: %w", err)
+ }
+
+ mu := multiuploader{uploader: u}
+ return mu.upload(reader, cleanup)
+}
+
+// init will initialize all default options.
+func (u *uploader) init() error {
+ if err := validateSupportedARNType(aws.ToString(u.in.Bucket)); err != nil {
+ return err
+ }
+
+ if u.cfg.Concurrency == 0 {
+ u.cfg.Concurrency = DefaultUploadConcurrency
+ }
+ if u.cfg.PartSize == 0 {
+ u.cfg.PartSize = DefaultUploadPartSize
+ }
+ if u.cfg.MaxUploadParts == 0 {
+ u.cfg.MaxUploadParts = MaxUploadParts
+ }
+
+ // Try to get the total size for some optimizations
+ if err := u.initSize(); err != nil {
+ return err
+ }
+
+ // If PartSize was changed or partPool was never setup then we need to allocated a new pool
+ // so that we return []byte slices of the correct size
+ poolCap := u.cfg.Concurrency + 1
+ if u.cfg.partPool == nil || u.cfg.partPool.SliceSize() != u.cfg.PartSize {
+ u.cfg.partPool = newByteSlicePool(u.cfg.PartSize)
+ u.cfg.partPool.ModifyCapacity(poolCap)
+ } else {
+ u.cfg.partPool = &returnCapacityPoolCloser{byteSlicePool: u.cfg.partPool}
+ u.cfg.partPool.ModifyCapacity(poolCap)
+ }
+
+ return nil
+}
+
+// initSize tries to detect the total stream size, setting u.totalSize. If
+// the size is not known, totalSize is set to -1.
+func (u *uploader) initSize() error {
+ u.totalSize = -1
+
+ switch r := u.in.Body.(type) {
+ case io.Seeker:
+ n, err := seekerLen(r)
+ if err != nil {
+ return err
+ }
+ u.totalSize = n
+
+ // Try to adjust partSize if it is too small and account for
+ // integer division truncation.
+ if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {
+ // Add one to the part size to account for remainders
+ // during the size calculation. e.g odd number of bytes.
+ u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1
+ }
+ }
+
+ return nil
+}
+
+// nextReader returns a seekable reader representing the next packet of data.
+// This operation increases the shared u.readerPos counter, but note that it
+// does not need to be wrapped in a mutex because nextReader is only called
+// from the main thread.
+func (u *uploader) nextReader() (io.ReadSeeker, int, func(), error) {
+ switch r := u.in.Body.(type) {
+ case readerAtSeeker:
+ var err error
+
+ n := u.cfg.PartSize
+ if u.totalSize >= 0 {
+ bytesLeft := u.totalSize - u.readerPos
+
+ if bytesLeft <= u.cfg.PartSize {
+ err = io.EOF
+ n = bytesLeft
+ }
+ }
+
+ var (
+ reader io.ReadSeeker
+ cleanup func()
+ )
+
+ reader = io.NewSectionReader(r, u.readerPos, n)
+ if u.cfg.BufferProvider != nil {
+ reader, cleanup = u.cfg.BufferProvider.GetWriteTo(reader)
+ } else {
+ cleanup = func() {}
+ }
+
+ u.readerPos += n
+
+ return reader, int(n), cleanup, err
+
+ default:
+ part, err := u.cfg.partPool.Get(u.ctx)
+ if err != nil {
+ return nil, 0, func() {}, err
+ }
+
+ n, err := readFillBuf(r, *part)
+ u.readerPos += int64(n)
+
+ cleanup := func() {
+ u.cfg.partPool.Put(part)
+ }
+
+ return bytes.NewReader((*part)[0:n]), n, cleanup, err
+ }
+}
+
+func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
+ for offset < len(b) && err == nil {
+ var n int
+ n, err = r.Read(b[offset:])
+ offset += n
+ }
+
+ return offset, err
+}
+
+// singlePart contains upload logic for uploading a single chunk via
+// a regular PutObject request. Multipart requests require at least two
+// parts, or at least 5MB of data.
+func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
+ defer cleanup()
+
+ var params s3.PutObjectInput
+ awsutil.Copy(¶ms, u.in)
+ params.Body = r
+
+ // Need to use request form because URL generated in request is
+ // used in return.
+
+ var locationRecorder recordLocationClient
+ out, err := u.cfg.S3.PutObject(u.ctx, ¶ms,
+ append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &UploadOutput{
+ Location: locationRecorder.location,
+
+ BucketKeyEnabled: aws.ToBool(out.BucketKeyEnabled),
+ ChecksumCRC32: out.ChecksumCRC32,
+ ChecksumCRC32C: out.ChecksumCRC32C,
+ ChecksumCRC64NVME: out.ChecksumCRC64NVME,
+ ChecksumSHA1: out.ChecksumSHA1,
+ ChecksumSHA256: out.ChecksumSHA256,
+ ETag: out.ETag,
+ Expiration: out.Expiration,
+ Key: params.Key,
+ RequestCharged: out.RequestCharged,
+ SSEKMSKeyId: out.SSEKMSKeyId,
+ ServerSideEncryption: out.ServerSideEncryption,
+ VersionID: out.VersionId,
+ }, nil
+}
+
+type httpClient interface {
+ Do(r *http.Request) (*http.Response, error)
+}
+
+type recordLocationClient struct {
+ httpClient
+ location string
+}
+
+func (c *recordLocationClient) WrapClient() func(o *s3.Options) {
+ return func(o *s3.Options) {
+ c.httpClient = o.HTTPClient
+ o.HTTPClient = c
+ }
+}
+
+func (c *recordLocationClient) Do(r *http.Request) (resp *http.Response, err error) {
+ resp, err = c.httpClient.Do(r)
+ if err != nil {
+ return resp, err
+ }
+
+ if resp.Request != nil && resp.Request.URL != nil {
+ url := *resp.Request.URL
+ url.RawQuery = ""
+ c.location = url.String()
+ }
+
+ return resp, err
+}
+
+// internal structure to manage a specific multipart upload to S3.
+type multiuploader struct {
+ *uploader
+ wg sync.WaitGroup
+ m sync.Mutex
+ err error
+ uploadID string
+ parts completedParts
+}
+
+// keeps track of a single chunk of data being sent to S3.
+type chunk struct {
+ buf io.ReadSeeker
+ num int32
+ cleanup func()
+}
+
+// completedParts is a wrapper to make parts sortable by their part number,
+// since S3 required this list to be sent in sorted order.
+type completedParts []types.CompletedPart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool {
+ return aws.ToInt32(a[i].PartNumber) < aws.ToInt32(a[j].PartNumber)
+}
+
+// upload will perform a multipart upload using the firstBuf buffer containing
+// the first chunk of data.
+func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
+ u.initChecksumAlgorithm()
+
+ var params s3.CreateMultipartUploadInput
+ awsutil.Copy(¶ms, u.in)
+
+ // Create the multipart
+ var locationRecorder recordLocationClient
+ resp, err := u.cfg.S3.CreateMultipartUpload(u.ctx, ¶ms,
+ append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
+ if err != nil {
+ cleanup()
+ return nil, err
+ }
+ u.uploadID = *resp.UploadId
+
+ // Create the workers
+ ch := make(chan chunk, u.cfg.Concurrency)
+ for i := 0; i < u.cfg.Concurrency; i++ {
+ u.wg.Add(1)
+ go u.readChunk(ch)
+ }
+
+ // Send part 1 to the workers
+ var num int32 = 1
+ ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup}
+
+ // Read and queue the rest of the parts
+ for u.geterr() == nil && err == nil {
+ var (
+ reader io.ReadSeeker
+ nextChunkLen int
+ ok bool
+ )
+
+ reader, nextChunkLen, cleanup, err = u.nextReader()
+ ok, err = u.shouldContinue(num, nextChunkLen, err)
+ if !ok {
+ cleanup()
+ if err != nil {
+ u.seterr(err)
+ }
+ break
+ }
+
+ num++
+
+ ch <- chunk{buf: reader, num: num, cleanup: cleanup}
+ }
+
+ // Close the channel, wait for workers, and complete upload
+ close(ch)
+ u.wg.Wait()
+ completeOut := u.complete()
+
+ if err := u.geterr(); err != nil {
+ return nil, &multiUploadError{
+ err: err,
+ uploadID: u.uploadID,
+ }
+ }
+
+ return &UploadOutput{
+ Location: locationRecorder.location,
+ UploadID: u.uploadID,
+ CompletedParts: u.parts,
+
+ BucketKeyEnabled: aws.ToBool(completeOut.BucketKeyEnabled),
+ ChecksumCRC32: completeOut.ChecksumCRC32,
+ ChecksumCRC32C: completeOut.ChecksumCRC32C,
+ ChecksumCRC64NVME: completeOut.ChecksumCRC64NVME,
+ ChecksumSHA1: completeOut.ChecksumSHA1,
+ ChecksumSHA256: completeOut.ChecksumSHA256,
+ ETag: completeOut.ETag,
+ Expiration: completeOut.Expiration,
+ Key: completeOut.Key,
+ RequestCharged: completeOut.RequestCharged,
+ SSEKMSKeyId: completeOut.SSEKMSKeyId,
+ ServerSideEncryption: completeOut.ServerSideEncryption,
+ VersionID: completeOut.VersionId,
+ }, nil
+}
+
+func (u *multiuploader) shouldContinue(part int32, nextChunkLen int, err error) (bool, error) {
+ if err != nil && err != io.EOF {
+ return false, fmt.Errorf("read multipart upload data failed, %w", err)
+ }
+
+ if nextChunkLen == 0 {
+ // No need to upload empty part, if file was empty to start
+ // with empty single part would of been created and never
+ // started multipart upload.
+ return false, nil
+ }
+
+ part++
+ // This upload exceeded maximum number of supported parts, error now.
+ if part > u.cfg.MaxUploadParts || part > MaxUploadParts {
+ var msg string
+ if part > u.cfg.MaxUploadParts {
+ msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ u.cfg.MaxUploadParts)
+ } else {
+ msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
+ MaxUploadParts)
+ }
+ return false, errors.New(msg)
+ }
+
+ return true, err
+}
+
+// readChunk runs in worker goroutines to pull chunks off of the ch channel
+// and send() them as UploadPart requests.
+func (u *multiuploader) readChunk(ch chan chunk) {
+ defer u.wg.Done()
+ for {
+ data, ok := <-ch
+
+ if !ok {
+ break
+ }
+
+ if u.geterr() == nil {
+ if err := u.send(data); err != nil {
+ u.seterr(err)
+ }
+ }
+
+ data.cleanup()
+ }
+}
+
+// send performs an UploadPart request and keeps track of the completed
+// part information.
+func (u *multiuploader) send(c chunk) error {
+ params := &s3.UploadPartInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ Body: c.buf,
+ SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
+ SSECustomerKey: u.in.SSECustomerKey,
+ SSECustomerKeyMD5: u.in.SSECustomerKeyMD5,
+ ExpectedBucketOwner: u.in.ExpectedBucketOwner,
+ RequestPayer: u.in.RequestPayer,
+
+ ChecksumAlgorithm: u.in.ChecksumAlgorithm,
+ // Invalid to set any of the individual ChecksumXXX members from
+ // PutObject as they are never valid for individual parts of a
+ // multipart upload.
+
+ PartNumber: aws.Int32(c.num),
+ UploadId: &u.uploadID,
+ }
+ // TODO should do copy then clear?
+
+ resp, err := u.cfg.S3.UploadPart(u.ctx, params, u.cfg.ClientOptions...)
+ if err != nil {
+ return err
+ }
+
+ var completed types.CompletedPart
+ awsutil.Copy(&completed, resp)
+ completed.PartNumber = aws.Int32(c.num)
+
+ u.m.Lock()
+ u.parts = append(u.parts, completed)
+ u.m.Unlock()
+
+ return nil
+}
+
+func (u *multiuploader) initChecksumAlgorithm() {
+ if u.in.ChecksumAlgorithm != "" {
+ return
+ }
+
+ switch {
+ case u.in.ChecksumCRC32 != nil:
+ u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32
+ case u.in.ChecksumCRC32C != nil:
+ u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32c
+ case u.in.ChecksumCRC64NVME != nil:
+ u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc64nvme
+ case u.in.ChecksumSHA1 != nil:
+ u.in.ChecksumAlgorithm = types.ChecksumAlgorithmSha1
+ case u.in.ChecksumSHA256 != nil:
+ u.in.ChecksumAlgorithm = types.ChecksumAlgorithmSha256
+ default:
+ u.in.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32
+ }
+}
+
+// geterr is a thread-safe getter for the error object
+func (u *multiuploader) geterr() error {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ return u.err
+}
+
+// seterr is a thread-safe setter for the error object
+func (u *multiuploader) seterr(e error) {
+ u.m.Lock()
+ defer u.m.Unlock()
+
+ u.err = e
+}
+
+// fail will abort the multipart unless LeavePartsOnError is set to true.
+func (u *multiuploader) fail() {
+ if u.cfg.LeavePartsOnError {
+ return
+ }
+
+ params := &s3.AbortMultipartUploadInput{
+ Bucket: u.in.Bucket,
+ Key: u.in.Key,
+ UploadId: &u.uploadID,
+ }
+ _, err := u.cfg.S3.AbortMultipartUpload(u.ctx, params, u.cfg.ClientOptions...)
+ if err != nil {
+ // TODO: Add logging
+ //logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
+ _ = err
+ }
+}
+
+// complete successfully completes a multipart upload and returns the response.
+func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
+ if u.geterr() != nil {
+ u.fail()
+ return nil
+ }
+
+ // Parts must be sorted in PartNumber order.
+ sort.Sort(u.parts)
+
+ var params s3.CompleteMultipartUploadInput
+ awsutil.Copy(¶ms, u.in)
+ params.UploadId = &u.uploadID
+ params.MultipartUpload = &types.CompletedMultipartUpload{Parts: u.parts}
+
+ resp, err := u.cfg.S3.CompleteMultipartUpload(u.ctx, ¶ms, u.cfg.ClientOptions...)
+ if err != nil {
+ u.seterr(err)
+ u.fail()
+ }
+
+ return resp
+}
+
+type readerAtSeeker interface {
+ io.ReaderAt
+ io.ReadSeeker
+}
+
+// setS3ExpressDefaultChecksum defaults to CRC32 for S3Express buckets,
+// which is required when uploading to those through transfer manager.
+type setS3ExpressDefaultChecksum struct{}
+
+func (*setS3ExpressDefaultChecksum) ID() string {
+ return "setS3ExpressDefaultChecksum"
+}
+
+func (*setS3ExpressDefaultChecksum) HandleFinalize(
+ ctx context.Context, in smithymiddleware.FinalizeInput, next smithymiddleware.FinalizeHandler,
+) (
+ out smithymiddleware.FinalizeOutput, metadata smithymiddleware.Metadata, err error,
+) {
+ const checksumHeader = "x-amz-checksum-algorithm"
+
+ if internalcontext.GetS3Backend(ctx) != internalcontext.S3BackendS3Express {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ // If this is CreateMultipartUpload we need to ensure the checksum
+ // algorithm header is present. Otherwise everything is driven off the
+ // context setting and we can let it flow from there.
+ if middleware.GetOperationName(ctx) == "CreateMultipartUpload" {
+ r, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if internalcontext.GetChecksumInputAlgorithm(ctx) == "" {
+ r.Header.Set(checksumHeader, "CRC32")
+ }
+ return next.HandleFinalize(ctx, in)
+ } else if internalcontext.GetChecksumInputAlgorithm(ctx) == "" {
+ ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(types.ChecksumAlgorithmCrc32))
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addFeatureUserAgent(stack *smithymiddleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(middleware.UserAgentFeatureS3Transfer)
+ return nil
+}
+
+func getOrAddRequestUserAgent(stack *smithymiddleware.Stack) (*middleware.RequestUserAgent, error) {
+ id := (*middleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = middleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, smithymiddleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*middleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go
new file mode 100644
index 000000000..fb10ec309
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/writer_read_from.go
@@ -0,0 +1,83 @@
+package manager
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go-v2/internal/sdkio"
+)
+
+// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom
+type WriterReadFrom interface {
+ io.Writer
+ io.ReaderFrom
+}
+
+// WriterReadFromProvider provides an implementation of io.ReadFrom for the given io.Writer
+type WriterReadFromProvider interface {
+ GetReadFrom(writer io.Writer) (w WriterReadFrom, cleanup func())
+}
+
+type bufferedWriter interface {
+ WriterReadFrom
+ Flush() error
+ Reset(io.Writer)
+}
+
+type bufferedReadFrom struct {
+ bufferedWriter
+}
+
+func (b *bufferedReadFrom) ReadFrom(r io.Reader) (int64, error) {
+ n, err := b.bufferedWriter.ReadFrom(r)
+ if flushErr := b.Flush(); flushErr != nil && err == nil {
+ err = flushErr
+ }
+ return n, err
+}
+
+// PooledBufferedReadFromProvider is a WriterReadFromProvider that uses a sync.Pool
+// to manage allocation and reuse of *bufio.Writer structures.
+type PooledBufferedReadFromProvider struct {
+ pool sync.Pool
+}
+
+// NewPooledBufferedWriterReadFromProvider returns a new PooledBufferedReadFromProvider
+// Size is used to control the size of the underlying *bufio.Writer created for
+// calls to GetReadFrom.
+func NewPooledBufferedWriterReadFromProvider(size int) *PooledBufferedReadFromProvider {
+ if size < int(32*sdkio.KibiByte) {
+ size = int(64 * sdkio.KibiByte)
+ }
+
+ return &PooledBufferedReadFromProvider{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return &bufferedReadFrom{bufferedWriter: bufio.NewWriterSize(nil, size)}
+ },
+ },
+ }
+}
+
+// GetReadFrom takes an io.Writer and wraps it with a type which satisfies the WriterReadFrom
+// interface/ Additionally a cleanup function is provided which must be called after usage of the WriterReadFrom
+// has been completed in order to allow the reuse of the *bufio.Writer
+func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r WriterReadFrom, cleanup func()) {
+ buffer := p.pool.Get().(*bufferedReadFrom)
+ buffer.Reset(writer)
+ r = buffer
+ cleanup = func() {
+ buffer.Reset(nil) // Reset to nil writer to release reference
+ p.pool.Put(buffer)
+ }
+ return r, cleanup
+}
+
+type suppressWriterAt struct {
+ suppressed io.Reader
+}
+
+func (s *suppressWriterAt) Read(p []byte) (n int, err error) {
+ return s.suppressed.Read(p)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go
new file mode 100644
index 000000000..0b81db548
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go
@@ -0,0 +1,45 @@
+package auth
+
+import (
+ "github.com/aws/smithy-go/auth"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// HTTPAuthScheme is the SDK's internal implementation of smithyhttp.AuthScheme
+// for pre-existing implementations where the signer was added to client
+// config. SDK clients will key off of this type and ensure per-operation
+// updates to those signers persist on the scheme itself.
+type HTTPAuthScheme struct {
+ schemeID string
+ signer smithyhttp.Signer
+}
+
+var _ smithyhttp.AuthScheme = (*HTTPAuthScheme)(nil)
+
+// NewHTTPAuthScheme returns an auth scheme instance with the given config.
+func NewHTTPAuthScheme(schemeID string, signer smithyhttp.Signer) *HTTPAuthScheme {
+ return &HTTPAuthScheme{
+ schemeID: schemeID,
+ signer: signer,
+ }
+}
+
+// SchemeID identifies the auth scheme.
+func (s *HTTPAuthScheme) SchemeID() string {
+ return s.schemeID
+}
+
+// IdentityResolver gets the identity resolver for the auth scheme.
+func (s *HTTPAuthScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver {
+ return o.GetIdentityResolver(s.schemeID)
+}
+
+// Signer gets the signer for the auth scheme.
+func (s *HTTPAuthScheme) Signer() smithyhttp.Signer {
+ return s.signer
+}
+
+// WithSigner returns a new instance of the auth scheme with the updated signer.
+func (s *HTTPAuthScheme) WithSigner(signer smithyhttp.Signer) *HTTPAuthScheme {
+ return NewHTTPAuthScheme(s.schemeID, signer)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go
new file mode 100644
index 000000000..bbc2ec06e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go
@@ -0,0 +1,191 @@
+package auth
+
+import (
+ "context"
+ "fmt"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// SigV4 is a constant representing
+// Authentication Scheme Signature Version 4
+const SigV4 = "sigv4"
+
+// SigV4A is a constant representing
+// Authentication Scheme Signature Version 4A
+const SigV4A = "sigv4a"
+
+// SigV4S3Express identifies the S3 S3Express auth scheme.
+const SigV4S3Express = "sigv4-s3express"
+
+// None is a constant representing the
+// None Authentication Scheme
+const None = "none"
+
+// SupportedSchemes is a data structure
+// that indicates the list of supported AWS
+// authentication schemes
+var SupportedSchemes = map[string]bool{
+ SigV4: true,
+ SigV4A: true,
+ SigV4S3Express: true,
+ None: true,
+}
+
+// AuthenticationScheme is a representation of
+// AWS authentication schemes
+type AuthenticationScheme interface {
+ isAuthenticationScheme()
+}
+
+// AuthenticationSchemeV4 is a AWS SigV4 representation
+type AuthenticationSchemeV4 struct {
+ Name string
+ SigningName *string
+ SigningRegion *string
+ DisableDoubleEncoding *bool
+}
+
+func (a *AuthenticationSchemeV4) isAuthenticationScheme() {}
+
+// AuthenticationSchemeV4A is a AWS SigV4A representation
+type AuthenticationSchemeV4A struct {
+ Name string
+ SigningName *string
+ SigningRegionSet []string
+ DisableDoubleEncoding *bool
+}
+
+func (a *AuthenticationSchemeV4A) isAuthenticationScheme() {}
+
+// AuthenticationSchemeNone is a representation for the none auth scheme
+type AuthenticationSchemeNone struct{}
+
+func (a *AuthenticationSchemeNone) isAuthenticationScheme() {}
+
+// NoAuthenticationSchemesFoundError is used in signaling
+// that no authentication schemes have been specified.
+type NoAuthenticationSchemesFoundError struct{}
+
+func (e *NoAuthenticationSchemesFoundError) Error() string {
+ return fmt.Sprint("No authentication schemes specified.")
+}
+
+// UnSupportedAuthenticationSchemeSpecifiedError is used in
+// signaling that only unsupported authentication schemes
+// were specified.
+type UnSupportedAuthenticationSchemeSpecifiedError struct {
+ UnsupportedSchemes []string
+}
+
+func (e *UnSupportedAuthenticationSchemeSpecifiedError) Error() string {
+ return fmt.Sprint("Unsupported authentication scheme specified.")
+}
+
+// GetAuthenticationSchemes extracts the relevant authentication scheme data
+// into a custom strongly typed Go data structure.
+func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, error) {
+ var result []AuthenticationScheme
+ if !p.Has("authSchemes") {
+ return nil, &NoAuthenticationSchemesFoundError{}
+ }
+
+ authSchemes, _ := p.Get("authSchemes").([]interface{})
+
+ var unsupportedSchemes []string
+ for _, scheme := range authSchemes {
+ authScheme, _ := scheme.(map[string]interface{})
+
+ version := authScheme["name"].(string)
+ switch version {
+ case SigV4, SigV4S3Express:
+ v4Scheme := AuthenticationSchemeV4{
+ Name: version,
+ SigningName: getSigningName(authScheme),
+ SigningRegion: getSigningRegion(authScheme),
+ DisableDoubleEncoding: getDisableDoubleEncoding(authScheme),
+ }
+ result = append(result, AuthenticationScheme(&v4Scheme))
+ case SigV4A:
+ v4aScheme := AuthenticationSchemeV4A{
+ Name: SigV4A,
+ SigningName: getSigningName(authScheme),
+ SigningRegionSet: getSigningRegionSet(authScheme),
+ DisableDoubleEncoding: getDisableDoubleEncoding(authScheme),
+ }
+ result = append(result, AuthenticationScheme(&v4aScheme))
+ case None:
+ noneScheme := AuthenticationSchemeNone{}
+ result = append(result, AuthenticationScheme(&noneScheme))
+ default:
+ unsupportedSchemes = append(unsupportedSchemes, authScheme["name"].(string))
+ continue
+ }
+ }
+
+ if len(result) == 0 {
+ return nil, &UnSupportedAuthenticationSchemeSpecifiedError{
+ UnsupportedSchemes: unsupportedSchemes,
+ }
+ }
+
+ return result, nil
+}
+
+type disableDoubleEncoding struct{}
+
+// SetDisableDoubleEncoding sets or modifies the disable double encoding option
+// on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetDisableDoubleEncoding(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, disableDoubleEncoding{}, value)
+}
+
+// GetDisableDoubleEncoding retrieves the disable double encoding option
+// from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetDisableDoubleEncoding(ctx context.Context) (value bool, ok bool) {
+ value, ok = middleware.GetStackValue(ctx, disableDoubleEncoding{}).(bool)
+ return value, ok
+}
+
+func getSigningName(authScheme map[string]interface{}) *string {
+ signingName, ok := authScheme["signingName"].(string)
+ if !ok || signingName == "" {
+ return nil
+ }
+ return &signingName
+}
+
+func getSigningRegionSet(authScheme map[string]interface{}) []string {
+ untypedSigningRegionSet, ok := authScheme["signingRegionSet"].([]interface{})
+ if !ok {
+ return nil
+ }
+ signingRegionSet := []string{}
+ for _, item := range untypedSigningRegionSet {
+ signingRegionSet = append(signingRegionSet, item.(string))
+ }
+ return signingRegionSet
+}
+
+func getSigningRegion(authScheme map[string]interface{}) *string {
+ signingRegion, ok := authScheme["signingRegion"].(string)
+ if !ok || signingRegion == "" {
+ return nil
+ }
+ return &signingRegion
+}
+
+func getDisableDoubleEncoding(authScheme map[string]interface{}) *bool {
+ disableDoubleEncoding, ok := authScheme["disableDoubleEncoding"].(bool)
+ if !ok {
+ return nil
+ }
+ return &disableDoubleEncoding
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go
new file mode 100644
index 000000000..f059b5d39
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go
@@ -0,0 +1,43 @@
+package smithy
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/auth/bearer"
+)
+
+// BearerTokenAdapter adapts smithy bearer.Token to smithy auth.Identity.
+type BearerTokenAdapter struct {
+ Token bearer.Token
+}
+
+var _ auth.Identity = (*BearerTokenAdapter)(nil)
+
+// Expiration returns the time of expiration for the token.
+func (v *BearerTokenAdapter) Expiration() time.Time {
+ return v.Token.Expires
+}
+
+// BearerTokenProviderAdapter adapts smithy bearer.TokenProvider to smithy
+// auth.IdentityResolver.
+type BearerTokenProviderAdapter struct {
+ Provider bearer.TokenProvider
+}
+
+var _ (auth.IdentityResolver) = (*BearerTokenProviderAdapter)(nil)
+
+// GetIdentity retrieves a bearer token using the underlying provider.
+func (v *BearerTokenProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) (
+ auth.Identity, error,
+) {
+ token, err := v.Provider.RetrieveBearerToken(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get token: %w", err)
+ }
+
+ return &BearerTokenAdapter{Token: token}, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go
new file mode 100644
index 000000000..a88281527
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go
@@ -0,0 +1,35 @@
+package smithy
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/auth/bearer"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// BearerTokenSignerAdapter adapts smithy bearer.Signer to smithy http
+// auth.Signer.
+type BearerTokenSignerAdapter struct {
+ Signer bearer.Signer
+}
+
+var _ (smithyhttp.Signer) = (*BearerTokenSignerAdapter)(nil)
+
+// SignRequest signs the request with the provided bearer token.
+func (v *BearerTokenSignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, _ smithy.Properties) error {
+ ca, ok := identity.(*BearerTokenAdapter)
+ if !ok {
+ return fmt.Errorf("unexpected identity type: %T", identity)
+ }
+
+ signed, err := v.Signer.SignWithBearerToken(ctx, ca.Token, r)
+ if err != nil {
+ return fmt.Errorf("sign request: %w", err)
+ }
+
+ *r = *signed.(*smithyhttp.Request)
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go
new file mode 100644
index 000000000..f926c4aaa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go
@@ -0,0 +1,46 @@
+package smithy
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+)
+
+// CredentialsAdapter adapts aws.Credentials to auth.Identity.
+type CredentialsAdapter struct {
+ Credentials aws.Credentials
+}
+
+var _ auth.Identity = (*CredentialsAdapter)(nil)
+
+// Expiration returns the time of expiration for the credentials.
+func (v *CredentialsAdapter) Expiration() time.Time {
+ return v.Credentials.Expires
+}
+
+// CredentialsProviderAdapter adapts aws.CredentialsProvider to auth.IdentityResolver.
+type CredentialsProviderAdapter struct {
+ Provider aws.CredentialsProvider
+}
+
+var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil)
+
+// GetIdentity retrieves AWS credentials using the underlying provider.
+func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) (
+ auth.Identity, error,
+) {
+ if v.Provider == nil {
+ return &CredentialsAdapter{Credentials: aws.Credentials{}}, nil
+ }
+
+ creds, err := v.Provider.Retrieve(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get credentials: %w", err)
+ }
+
+ return &CredentialsAdapter{Credentials: creds}, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go
new file mode 100644
index 000000000..42b458673
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go
@@ -0,0 +1,2 @@
+// Package smithy adapts concrete AWS auth and signing types to the generic smithy versions.
+package smithy
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go
new file mode 100644
index 000000000..24db8e144
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go
@@ -0,0 +1,57 @@
+package smithy
+
+import (
+ "context"
+ "fmt"
+
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// V4SignerAdapter adapts v4.HTTPSigner to smithy http.Signer.
+type V4SignerAdapter struct {
+ Signer v4.HTTPSigner
+ Logger logging.Logger
+ LogSigning bool
+}
+
+var _ (smithyhttp.Signer) = (*V4SignerAdapter)(nil)
+
+// SignRequest signs the request with the provided identity.
+func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error {
+ ca, ok := identity.(*CredentialsAdapter)
+ if !ok {
+ return fmt.Errorf("unexpected identity type: %T", identity)
+ }
+
+ name, ok := smithyhttp.GetSigV4SigningName(&props)
+ if !ok {
+ return fmt.Errorf("sigv4 signing name is required")
+ }
+
+ region, ok := smithyhttp.GetSigV4SigningRegion(&props)
+ if !ok {
+ return fmt.Errorf("sigv4 signing region is required")
+ }
+
+ hash := v4.GetPayloadHash(ctx)
+ signingTime := sdk.NowTime()
+ skew := internalcontext.GetAttemptSkewContext(ctx)
+ signingTime = signingTime.Add(skew)
+ err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, signingTime, func(o *v4.SignerOptions) {
+ o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props)
+
+ o.Logger = v.Logger
+ o.LogSigning = v.LogSigning
+ })
+ if err != nil {
+ return fmt.Errorf("sign http: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go
new file mode 100644
index 000000000..938cd14c1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go
@@ -0,0 +1,112 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+ "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ if _, ok := src.Interface().(*time.Time); !ok {
+ if dst.Kind() == reflect.String {
+ dst.SetString(e.String())
+ } else {
+ dst.Set(reflect.New(e))
+ }
+ } else {
+ tempValue := reflect.New(e)
+ tempValue.Elem().Set(src.Elem())
+ // Sets time.Time's unexported values
+ dst.Set(tempValue)
+ }
+ }
+ if dst.Kind() != reflect.String && src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go
new file mode 100644
index 000000000..bcfe51a2b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go
@@ -0,0 +1,33 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type the are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ // Special casing for strings as typed enumerations are string aliases
+ // but are not deep equal.
+ if ra.Kind() == reflect.String && rb.Kind() == reflect.String {
+ return ra.String() == rb.String()
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go
new file mode 100644
index 000000000..1adecae6b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go
@@ -0,0 +1,131 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ isPtr := false
+ for v.Kind() == reflect.Ptr {
+ isPtr = true
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("")
+ break
+ }
+
+ if isPtr {
+ buf.WriteRune('&')
+ }
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, " len %d", v.Len())
+ break
+ }
+
+ nl, id, id2 := "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ if isPtr {
+ buf.WriteRune('&')
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ if isPtr {
+ buf.WriteRune('&')
+ }
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "")
+ return
+ }
+
+ for v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+
+ if v.Kind() == reflect.Ptr || v.Kind() == reflect.Struct || v.Kind() == reflect.Map || v.Kind() == reflect.Slice {
+ prettify(v, indent, buf)
+ return
+ }
+
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go
new file mode 100644
index 000000000..645df2450
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go
@@ -0,0 +1,88 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ for i := 0; i < v.Type().NumField(); i++ {
+ ft := v.Type().Field(i)
+ fv := v.Field(i)
+
+ if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
+ continue // ignore unset fields
+ }
+
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(ft.Name + ": ")
+
+ if tag := ft.Tag.Get("sensitive"); tag == "true" {
+ buf.WriteString("")
+ } else {
+ stringValue(fv, indent+2, buf)
+ }
+
+ buf.WriteString(",\n")
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
new file mode 100644
index 000000000..0981931aa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
@@ -0,0 +1,473 @@
+# v1.4.13 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.4.12 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.11 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.10 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.9 (2025-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.8 (2025-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.7 (2025-09-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.6 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.5 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.4 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.3 (2025-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.2 (2025-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.37 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.36 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.35 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.34 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.33 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.32 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.31 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.30 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.29 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.3.28 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.27 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.26 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.25 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.24 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.23 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.22 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.21 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.20 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.19 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.18 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.17 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.16 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.15 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.14 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.13 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.12 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.11 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.10 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.9 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.8 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.5 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2024-03-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.43 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.42 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.41 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.40 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.39 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.38 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.37 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.36 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.35 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.34 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.33 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.32 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.31 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.30 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.29 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.28 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.27 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.26 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.25 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.24 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.23 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.22 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.21 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.20 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.19 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.18 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.17 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.16 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.15 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.14 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.13 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.12 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.11 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.10 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.9 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.8 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.7 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.6 (2022-03-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.5 (2022-02-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.4 (2022-01-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.3 (2022-01-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.7 (2021-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.6 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.5 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.4 (2021-08-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.2 (2021-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.1 (2021-07-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.0 (2021-06-25)
+
+* **Release**: Release new modules
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go
new file mode 100644
index 000000000..cd4d19b89
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go
@@ -0,0 +1,65 @@
+package configsources
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value
+// for Enable Endpoint Discovery
+type EnableEndpointDiscoveryProvider interface {
+ GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error)
+}
+
+// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice.
+// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs,
+// and error if one is encountered.
+func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok {
+ value, found, err = p.GetEnableEndpointDiscovery(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint
+type UseDualStackEndpointProvider interface {
+ GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error)
+}
+
+// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice.
+// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered.
+func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(UseDualStackEndpointProvider); ok {
+ value, found, err = p.GetUseDualStackEndpoint(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint
+type UseFIPSEndpointProvider interface {
+ GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error)
+}
+
+// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice.
+// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered.
+func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(UseFIPSEndpointProvider); ok {
+ value, found, err = p.GetUseFIPSEndpoint(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go
new file mode 100644
index 000000000..e7835f852
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go
@@ -0,0 +1,57 @@
+package configsources
+
+import (
+ "context"
+)
+
+// ServiceBaseEndpointProvider is needed to search for all providers
+// that provide a configured service endpoint
+type ServiceBaseEndpointProvider interface {
+ GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error)
+}
+
+// IgnoreConfiguredEndpointsProvider is needed to search for all providers
+// that provide a flag to disable configured endpoints.
+//
+// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because
+// service packages cannot import github.com/aws/aws-sdk-go-v2/config
+// due to result import cycle error.
+type IgnoreConfiguredEndpointsProvider interface {
+ GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error)
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+//
+// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because
+// service packages cannot import github.com/aws/aws-sdk-go-v2/config
+// due to result import cycle error.
+func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok {
+ value, found, err = p.GetIgnoreConfiguredEndpoints(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources
+// while allowing for configured endpoints to be disabled
+func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) {
+ if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val {
+ return "", false, nil
+ }
+
+ for _, cs := range configs {
+ if p, ok := cs.(ServiceBaseEndpointProvider); ok {
+ value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
new file mode 100644
index 000000000..970e61dee
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package configsources
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.4.13"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go
new file mode 100644
index 000000000..f0c283d39
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go
@@ -0,0 +1,52 @@
+package context
+
+import (
+ "context"
+ "time"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type s3BackendKey struct{}
+type checksumInputAlgorithmKey struct{}
+type clockSkew struct{}
+
+const (
+ // S3BackendS3Express identifies the S3Express backend
+ S3BackendS3Express = "S3Express"
+)
+
+// SetS3Backend stores the resolved endpoint backend within the request
+// context, which is required for a variety of custom S3 behaviors.
+func SetS3Backend(ctx context.Context, typ string) context.Context {
+ return middleware.WithStackValue(ctx, s3BackendKey{}, typ)
+}
+
+// GetS3Backend retrieves the stored endpoint backend within the context.
+func GetS3Backend(ctx context.Context) string {
+ v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string)
+ return v
+}
+
+// SetChecksumInputAlgorithm sets the request checksum algorithm on the
+// context.
+func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value)
+}
+
+// GetChecksumInputAlgorithm returns the checksum algorithm from the context.
+func GetChecksumInputAlgorithm(ctx context.Context) string {
+ v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string)
+ return v
+}
+
+// SetAttemptSkewContext sets the clock skew value on the context
+func SetAttemptSkewContext(ctx context.Context, v time.Duration) context.Context {
+ return middleware.WithStackValue(ctx, clockSkew{}, v)
+}
+
+// GetAttemptSkewContext gets the clock skew value from the context
+func GetAttemptSkewContext(ctx context.Context) time.Duration {
+ x, _ := middleware.GetStackValue(ctx, clockSkew{}).(time.Duration)
+ return x
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go
new file mode 100644
index 000000000..e6223dd3b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go
@@ -0,0 +1,94 @@
+package awsrulesfn
+
+import (
+ "strings"
+)
+
+// ARN provides AWS ARN components broken out into a data structure.
+type ARN struct {
+ Partition string
+ Service string
+ Region string
+ AccountId string
+ ResourceId OptionalStringSlice
+}
+
+const (
+ arnDelimiters = ":"
+ resourceDelimiters = "/:"
+ arnSections = 6
+ arnPrefix = "arn:"
+
+ // zero-indexed
+ sectionPartition = 1
+ sectionService = 2
+ sectionRegion = 3
+ sectionAccountID = 4
+ sectionResource = 5
+)
+
+// ParseARN returns an [ARN] value parsed from the input string provided. If
+// the ARN cannot be parsed nil will be returned, and error added to
+// [ErrorCollector].
+func ParseARN(input string) *ARN {
+ if !strings.HasPrefix(input, arnPrefix) {
+ return nil
+ }
+
+ sections := strings.SplitN(input, arnDelimiters, arnSections)
+ if numSections := len(sections); numSections != arnSections {
+ return nil
+ }
+
+ if sections[sectionPartition] == "" {
+ return nil
+ }
+ if sections[sectionService] == "" {
+ return nil
+ }
+ if sections[sectionResource] == "" {
+ return nil
+ }
+
+ return &ARN{
+ Partition: sections[sectionPartition],
+ Service: sections[sectionService],
+ Region: sections[sectionRegion],
+ AccountId: sections[sectionAccountID],
+ ResourceId: splitResource(sections[sectionResource]),
+ }
+}
+
+// splitResource splits the resource components by the ARN resource delimiters.
+func splitResource(v string) []string {
+ var parts []string
+ var offset int
+
+ for offset <= len(v) {
+ idx := strings.IndexAny(v[offset:], "/:")
+ if idx < 0 {
+ parts = append(parts, v[offset:])
+ break
+ }
+ parts = append(parts, v[offset:idx+offset])
+ offset += idx + 1
+ }
+
+ return parts
+}
+
+// OptionalStringSlice provides a helper to safely get the index of a string
+// slice that may be out of bounds. Returns pointer to string if index is
+// valid. Otherwise returns nil.
+type OptionalStringSlice []string
+
+// Get returns a string pointer of the string at index i if the index is valid.
+// Otherwise returns nil.
+func (s OptionalStringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go
new file mode 100644
index 000000000..d5a365853
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go
@@ -0,0 +1,3 @@
+// Package awsrulesfn provides AWS focused endpoint rule functions for
+// evaluating endpoint resolution rules.
+package awsrulesfn
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go
new file mode 100644
index 000000000..df72da97c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go
@@ -0,0 +1,7 @@
+//go:build codegen
+// +build codegen
+
+package awsrulesfn
+
+//go:generate go run -tags codegen ./internal/partition/codegen.go -model partitions.json -output partitions.go
+//go:generate gofmt -w -s .
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go
new file mode 100644
index 000000000..637e5fc18
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go
@@ -0,0 +1,51 @@
+package awsrulesfn
+
+import (
+ "net"
+ "strings"
+
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// IsVirtualHostableS3Bucket returns if the input is a DNS compatible bucket
+// name and can be used with Amazon S3 virtual hosted style addressing. Similar
+// to [rulesfn.IsValidHostLabel] with the added restriction that the length of label
+// must be [3:63] characters long, all lowercase, and not formatted as an IP
+// address.
+func IsVirtualHostableS3Bucket(input string, allowSubDomains bool) bool {
+ // input should not be formatted as an IP address
+ // NOTE: this will technically trip up on IPv6 hosts with zone IDs, but
+ // validation further down will catch that anyway (it's guaranteed to have
+ // unfriendly characters % and : if that's the case)
+ if net.ParseIP(input) != nil {
+ return false
+ }
+
+ var labels []string
+ if allowSubDomains {
+ labels = strings.Split(input, ".")
+ } else {
+ labels = []string{input}
+ }
+
+ for _, label := range labels {
+ // validate special length constraints
+ if l := len(label); l < 3 || l > 63 {
+ return false
+ }
+
+ // Validate no capital letters
+ for _, r := range label {
+ if r >= 'A' && r <= 'Z' {
+ return false
+ }
+ }
+
+ // Validate valid host label
+ if !smithyhttp.ValidHostLabel(label) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go
new file mode 100644
index 000000000..91414afe8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go
@@ -0,0 +1,76 @@
+package awsrulesfn
+
+import "regexp"
+
+// Partition provides the metadata describing an AWS partition.
+type Partition struct {
+ ID string `json:"id"`
+ Regions map[string]RegionOverrides `json:"regions"`
+ RegionRegex string `json:"regionRegex"`
+ DefaultConfig PartitionConfig `json:"outputs"`
+}
+
+// PartitionConfig provides the endpoint metadata for an AWS region or partition.
+type PartitionConfig struct {
+ Name string `json:"name"`
+ DnsSuffix string `json:"dnsSuffix"`
+ DualStackDnsSuffix string `json:"dualStackDnsSuffix"`
+ SupportsFIPS bool `json:"supportsFIPS"`
+ SupportsDualStack bool `json:"supportsDualStack"`
+ ImplicitGlobalRegion string `json:"implicitGlobalRegion"`
+}
+
+type RegionOverrides struct {
+ Name *string `json:"name"`
+ DnsSuffix *string `json:"dnsSuffix"`
+ DualStackDnsSuffix *string `json:"dualStackDnsSuffix"`
+ SupportsFIPS *bool `json:"supportsFIPS"`
+ SupportsDualStack *bool `json:"supportsDualStack"`
+}
+
+const defaultPartition = "aws"
+
+func getPartition(partitions []Partition, region string) *PartitionConfig {
+ for _, partition := range partitions {
+ if v, ok := partition.Regions[region]; ok {
+ p := mergeOverrides(partition.DefaultConfig, v)
+ return &p
+ }
+ }
+
+ for _, partition := range partitions {
+ regionRegex := regexp.MustCompile(partition.RegionRegex)
+ if regionRegex.MatchString(region) {
+ v := partition.DefaultConfig
+ return &v
+ }
+ }
+
+ for _, partition := range partitions {
+ if partition.ID == defaultPartition {
+ v := partition.DefaultConfig
+ return &v
+ }
+ }
+
+ return nil
+}
+
+func mergeOverrides(into PartitionConfig, from RegionOverrides) PartitionConfig {
+ if from.Name != nil {
+ into.Name = *from.Name
+ }
+ if from.DnsSuffix != nil {
+ into.DnsSuffix = *from.DnsSuffix
+ }
+ if from.DualStackDnsSuffix != nil {
+ into.DualStackDnsSuffix = *from.DualStackDnsSuffix
+ }
+ if from.SupportsFIPS != nil {
+ into.SupportsFIPS = *from.SupportsFIPS
+ }
+ if from.SupportsDualStack != nil {
+ into.SupportsDualStack = *from.SupportsDualStack
+ }
+ return into
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
new file mode 100644
index 000000000..6ab4d9669
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
@@ -0,0 +1,496 @@
+// Code generated by endpoint/awsrulesfn/internal/partition. DO NOT EDIT.
+
+package awsrulesfn
+
+// GetPartition returns an AWS [Partition] for the region provided. If the
+// partition cannot be determined then the default partition (AWS commercial)
+// will be returned.
+func GetPartition(region string) *PartitionConfig {
+ return getPartition(partitions, region)
+}
+
+var partitions = []Partition{
+ {
+ ID: "aws",
+ RegionRegex: "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws",
+ DnsSuffix: "amazonaws.com",
+ DualStackDnsSuffix: "api.aws",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "us-east-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "af-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-east-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-northeast-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-northeast-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-northeast-3": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-south-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-3": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-4": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-5": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-6": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-7": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "aws-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ca-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ca-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-central-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-north-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-south-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-west-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-west-3": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "il-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "me-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "me-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "mx-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "sa-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-east-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-west-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ RegionRegex: "^cn\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-cn",
+ DnsSuffix: "amazonaws.com.cn",
+ DualStackDnsSuffix: "api.amazonwebservices.com.cn",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "cn-northwest-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-cn-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "cn-north-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "cn-northwest-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-eusc",
+ RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-eusc",
+ DnsSuffix: "amazonaws.eu",
+ DualStackDnsSuffix: "api.amazonwebservices.eu",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "eusc-de-east-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "eusc-de-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-iso",
+ RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso",
+ DnsSuffix: "c2s.ic.gov",
+ DualStackDnsSuffix: "api.aws.ic.gov",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "us-iso-east-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-iso-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-iso-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-iso-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-iso-b",
+ RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso-b",
+ DnsSuffix: "sc2s.sgov.gov",
+ DualStackDnsSuffix: "api.aws.scloud",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "us-isob-east-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-iso-b-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-isob-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-isob-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-iso-e",
+ RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso-e",
+ DnsSuffix: "cloud.adc-e.uk",
+ DualStackDnsSuffix: "api.cloud-aws.adc-e.uk",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "eu-isoe-west-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-iso-e-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-isoe-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-iso-f",
+ RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso-f",
+ DnsSuffix: "csp.hci.ic.gov",
+ DualStackDnsSuffix: "api.aws.hci.ic.gov",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "us-isof-south-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-iso-f-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-isof-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-isof-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-us-gov",
+ RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-us-gov",
+ DnsSuffix: "amazonaws.com",
+ DualStackDnsSuffix: "api.aws",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "us-gov-west-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-us-gov-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-gov-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-gov-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
new file mode 100644
index 000000000..c789264d2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
@@ -0,0 +1,267 @@
+{
+ "partitions" : [ {
+ "id" : "aws",
+ "outputs" : {
+ "dnsSuffix" : "amazonaws.com",
+ "dualStackDnsSuffix" : "api.aws",
+ "implicitGlobalRegion" : "us-east-1",
+ "name" : "aws",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$",
+ "regions" : {
+ "af-south-1" : {
+ "description" : "Africa (Cape Town)"
+ },
+ "ap-east-1" : {
+ "description" : "Asia Pacific (Hong Kong)"
+ },
+ "ap-east-2" : {
+ "description" : "Asia Pacific (Taipei)"
+ },
+ "ap-northeast-1" : {
+ "description" : "Asia Pacific (Tokyo)"
+ },
+ "ap-northeast-2" : {
+ "description" : "Asia Pacific (Seoul)"
+ },
+ "ap-northeast-3" : {
+ "description" : "Asia Pacific (Osaka)"
+ },
+ "ap-south-1" : {
+ "description" : "Asia Pacific (Mumbai)"
+ },
+ "ap-south-2" : {
+ "description" : "Asia Pacific (Hyderabad)"
+ },
+ "ap-southeast-1" : {
+ "description" : "Asia Pacific (Singapore)"
+ },
+ "ap-southeast-2" : {
+ "description" : "Asia Pacific (Sydney)"
+ },
+ "ap-southeast-3" : {
+ "description" : "Asia Pacific (Jakarta)"
+ },
+ "ap-southeast-4" : {
+ "description" : "Asia Pacific (Melbourne)"
+ },
+ "ap-southeast-5" : {
+ "description" : "Asia Pacific (Malaysia)"
+ },
+ "ap-southeast-6" : {
+ "description" : "Asia Pacific (New Zealand)"
+ },
+ "ap-southeast-7" : {
+ "description" : "Asia Pacific (Thailand)"
+ },
+ "aws-global" : {
+ "description" : "aws global region"
+ },
+ "ca-central-1" : {
+ "description" : "Canada (Central)"
+ },
+ "ca-west-1" : {
+ "description" : "Canada West (Calgary)"
+ },
+ "eu-central-1" : {
+ "description" : "Europe (Frankfurt)"
+ },
+ "eu-central-2" : {
+ "description" : "Europe (Zurich)"
+ },
+ "eu-north-1" : {
+ "description" : "Europe (Stockholm)"
+ },
+ "eu-south-1" : {
+ "description" : "Europe (Milan)"
+ },
+ "eu-south-2" : {
+ "description" : "Europe (Spain)"
+ },
+ "eu-west-1" : {
+ "description" : "Europe (Ireland)"
+ },
+ "eu-west-2" : {
+ "description" : "Europe (London)"
+ },
+ "eu-west-3" : {
+ "description" : "Europe (Paris)"
+ },
+ "il-central-1" : {
+ "description" : "Israel (Tel Aviv)"
+ },
+ "me-central-1" : {
+ "description" : "Middle East (UAE)"
+ },
+ "me-south-1" : {
+ "description" : "Middle East (Bahrain)"
+ },
+ "mx-central-1" : {
+ "description" : "Mexico (Central)"
+ },
+ "sa-east-1" : {
+ "description" : "South America (Sao Paulo)"
+ },
+ "us-east-1" : {
+ "description" : "US East (N. Virginia)"
+ },
+ "us-east-2" : {
+ "description" : "US East (Ohio)"
+ },
+ "us-west-1" : {
+ "description" : "US West (N. California)"
+ },
+ "us-west-2" : {
+ "description" : "US West (Oregon)"
+ }
+ }
+ }, {
+ "id" : "aws-cn",
+ "outputs" : {
+ "dnsSuffix" : "amazonaws.com.cn",
+ "dualStackDnsSuffix" : "api.amazonwebservices.com.cn",
+ "implicitGlobalRegion" : "cn-northwest-1",
+ "name" : "aws-cn",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^cn\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-cn-global" : {
+ "description" : "aws-cn global region"
+ },
+ "cn-north-1" : {
+ "description" : "China (Beijing)"
+ },
+ "cn-northwest-1" : {
+ "description" : "China (Ningxia)"
+ }
+ }
+ }, {
+ "id" : "aws-eusc",
+ "outputs" : {
+ "dnsSuffix" : "amazonaws.eu",
+ "dualStackDnsSuffix" : "api.amazonwebservices.eu",
+ "implicitGlobalRegion" : "eusc-de-east-1",
+ "name" : "aws-eusc",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$",
+ "regions" : {
+ "eusc-de-east-1" : {
+ "description" : "EU (Germany)"
+ }
+ }
+ }, {
+ "id" : "aws-iso",
+ "outputs" : {
+ "dnsSuffix" : "c2s.ic.gov",
+ "dualStackDnsSuffix" : "api.aws.ic.gov",
+ "implicitGlobalRegion" : "us-iso-east-1",
+ "name" : "aws-iso",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-iso-global" : {
+ "description" : "aws-iso global region"
+ },
+ "us-iso-east-1" : {
+ "description" : "US ISO East"
+ },
+ "us-iso-west-1" : {
+ "description" : "US ISO WEST"
+ }
+ }
+ }, {
+ "id" : "aws-iso-b",
+ "outputs" : {
+ "dnsSuffix" : "sc2s.sgov.gov",
+ "dualStackDnsSuffix" : "api.aws.scloud",
+ "implicitGlobalRegion" : "us-isob-east-1",
+ "name" : "aws-iso-b",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-iso-b-global" : {
+ "description" : "aws-iso-b global region"
+ },
+ "us-isob-east-1" : {
+ "description" : "US ISOB East (Ohio)"
+ },
+ "us-isob-west-1" : {
+ "description" : "US ISOB West"
+ }
+ }
+ }, {
+ "id" : "aws-iso-e",
+ "outputs" : {
+ "dnsSuffix" : "cloud.adc-e.uk",
+ "dualStackDnsSuffix" : "api.cloud-aws.adc-e.uk",
+ "implicitGlobalRegion" : "eu-isoe-west-1",
+ "name" : "aws-iso-e",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-iso-e-global" : {
+ "description" : "aws-iso-e global region"
+ },
+ "eu-isoe-west-1" : {
+ "description" : "EU ISOE West"
+ }
+ }
+ }, {
+ "id" : "aws-iso-f",
+ "outputs" : {
+ "dnsSuffix" : "csp.hci.ic.gov",
+ "dualStackDnsSuffix" : "api.aws.hci.ic.gov",
+ "implicitGlobalRegion" : "us-isof-south-1",
+ "name" : "aws-iso-f",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-iso-f-global" : {
+ "description" : "aws-iso-f global region"
+ },
+ "us-isof-east-1" : {
+ "description" : "US ISOF EAST"
+ },
+ "us-isof-south-1" : {
+ "description" : "US ISOF SOUTH"
+ }
+ }
+ }, {
+ "id" : "aws-us-gov",
+ "outputs" : {
+ "dnsSuffix" : "amazonaws.com",
+ "dualStackDnsSuffix" : "api.aws",
+ "implicitGlobalRegion" : "us-gov-west-1",
+ "name" : "aws-us-gov",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-us-gov-global" : {
+ "description" : "aws-us-gov global region"
+ },
+ "us-gov-east-1" : {
+ "description" : "AWS GovCloud (US-East)"
+ },
+ "us-gov-west-1" : {
+ "description" : "AWS GovCloud (US-West)"
+ }
+ }
+ } ],
+ "version" : "1.1"
+}
\ No newline at end of file
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..67950ca36
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go
@@ -0,0 +1,201 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4"}
+)
+
+// Options provide configuration needed to direct how endpoints are resolved.
+type Options struct {
+ // Disable usage of HTTPS (TLS / SSL)
+ DisableHTTPS bool
+}
+
+// Partitions is a slice of partition
+type Partitions []Partition
+
+// ResolveEndpoint resolves a service endpoint for the given region and options.
+func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) {
+ if len(ps) == 0 {
+ return aws.Endpoint{}, fmt.Errorf("no partitions found")
+ }
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(region) {
+ continue
+ }
+
+ return ps[i].ResolveEndpoint(region, opts)
+ }
+
+ // fallback to first partition format to use when resolving the endpoint.
+ return ps[0].ResolveEndpoint(region, opts)
+}
+
+// Partition is an AWS partition description for a service and its' region endpoints.
+type Partition struct {
+ ID string
+ RegionRegex *regexp.Regexp
+ PartitionEndpoint string
+ IsRegionalized bool
+ Defaults Endpoint
+ Endpoints Endpoints
+}
+
+func (p Partition) canResolveEndpoint(region string) bool {
+ _, ok := p.Endpoints[region]
+ return ok || p.RegionRegex.MatchString(region)
+}
+
+// ResolveEndpoint resolves and service endpoint for the given region and options.
+func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) {
+ if len(region) == 0 && len(p.PartitionEndpoint) != 0 {
+ region = p.PartitionEndpoint
+ }
+
+ e, _ := p.endpointForRegion(region)
+
+ return e.resolve(p.ID, region, p.Defaults, options), nil
+}
+
+func (p Partition) endpointForRegion(region string) (Endpoint, bool) {
+ if e, ok := p.Endpoints[region]; ok {
+ return e, true
+ }
+
+ if !p.IsRegionalized {
+ return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return Endpoint{}, false
+}
+
+// Endpoints is a map of service config regions to endpoints
+type Endpoints map[string]Endpoint
+
+// CredentialScope is the credential scope of a region and service
+type CredentialScope struct {
+ Region string
+ Service string
+}
+
+// Endpoint is a service endpoint description
+type Endpoint struct {
+ // True if the endpoint cannot be resolved for this partition/region/service
+ Unresolveable aws.Ternary
+
+ Hostname string
+ Protocols []string
+
+ CredentialScope CredentialScope
+
+ SignatureVersions []string `json:"signatureVersions"`
+}
+
+func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint {
+ var merged Endpoint
+ merged.mergeIn(def)
+ merged.mergeIn(e)
+ e = merged
+
+ var u string
+ if e.Unresolveable != aws.TrueTernary {
+ // Only attempt to resolve the endpoint if it can be resolved.
+ hostname := strings.Replace(e.Hostname, "{region}", region, 1)
+
+ scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS)
+ u = scheme + "://" + hostname
+ }
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+ signingName := e.CredentialScope.Service
+
+ return aws.Endpoint{
+ URL: u,
+ PartitionID: partition,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func (e *Endpoint) mergeIn(other Endpoint) {
+ if other.Unresolveable != aws.UnknownTernary {
+ e.Unresolveable = other.Unresolveable
+ }
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+}
+
+func getEndpointScheme(protocols []string, disableHTTPS bool) string {
+ if disableHTTPS {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+// MapFIPSRegion extracts the intrinsic AWS region from one that may have an
+// embedded FIPS microformat.
+func MapFIPSRegion(region string) string {
+ const fipsInfix = "-fips-"
+ const fipsPrefix = "fips-"
+ const fipsSuffix = "-fips"
+
+ if strings.Contains(region, fipsInfix) ||
+ strings.Contains(region, fipsPrefix) ||
+ strings.Contains(region, fipsSuffix) {
+ region = strings.ReplaceAll(region, fipsInfix, "-")
+ region = strings.ReplaceAll(region, fipsPrefix, "")
+ region = strings.ReplaceAll(region, fipsSuffix, "")
+ }
+
+ return region
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
new file mode 100644
index 000000000..9c3aafe1d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
@@ -0,0 +1,448 @@
+# v2.7.13 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v2.7.12 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.11 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.10 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.9 (2025-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.8 (2025-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.7 (2025-09-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.6 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.5 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.4 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.3 (2025-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.2 (2025-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.37 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.36 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.35 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.34 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.33 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.32 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.31 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.30 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.29 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v2.6.28 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.27 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.26 (2024-12-19)
+
+* **Bug Fix**: Fix improper use of printf-style functions.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.25 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.24 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.23 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.22 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.21 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.20 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.19 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.18 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.17 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.16 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.15 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.14 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.13 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.12 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.11 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.10 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.9 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.8 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.5 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.3 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.37 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.36 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.29 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.28 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.27 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.26 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.25 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.19 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.17 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.16 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.15 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.14 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.13 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.12 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.11 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.10 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.9 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.6 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.3.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.2.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.1.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.0.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.0.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.0.0 (2021-11-06)
+
+* **Release**: Endpoint Variant Model Support
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go
new file mode 100644
index 000000000..32251a7e3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go
@@ -0,0 +1,302 @@
+package endpoints
+
+import (
+ "fmt"
+ "github.com/aws/smithy-go/logging"
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// DefaultKey is a compound map key of a variant and other values.
+type DefaultKey struct {
+ Variant EndpointVariant
+ ServiceVariant ServiceVariant
+}
+
+// EndpointKey is a compound map key of a region and associated variant value.
+type EndpointKey struct {
+ Region string
+ Variant EndpointVariant
+ ServiceVariant ServiceVariant
+}
+
+// EndpointVariant is a bit field to describe the endpoints attributes.
+type EndpointVariant uint64
+
+const (
+ // FIPSVariant indicates that the endpoint is FIPS capable.
+ FIPSVariant EndpointVariant = 1 << (64 - 1 - iota)
+
+ // DualStackVariant indicates that the endpoint is DualStack capable.
+ DualStackVariant
+)
+
+// ServiceVariant is a bit field to describe the service endpoint attributes.
+type ServiceVariant uint64
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4", "s3v4"}
+)
+
+// Options provide configuration needed to direct how endpoints are resolved.
+type Options struct {
+ // Logger is a logging implementation that log events should be sent to.
+ Logger logging.Logger
+
+ // LogDeprecated indicates that deprecated endpoints should be logged to the provided logger.
+ LogDeprecated bool
+
+ // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority
+ // over the region name passed to the ResolveEndpoint call.
+ ResolvedRegion string
+
+ // Disable usage of HTTPS (TLS / SSL)
+ DisableHTTPS bool
+
+ // Instruct the resolver to use a service endpoint that supports dual-stack.
+ // If a service does not have a dual-stack endpoint an error will be returned by the resolver.
+ UseDualStackEndpoint aws.DualStackEndpointState
+
+ // Instruct the resolver to use a service endpoint that supports FIPS.
+ // If a service does not have a FIPS endpoint an error will be returned by the resolver.
+ UseFIPSEndpoint aws.FIPSEndpointState
+
+ // ServiceVariant is a bitfield of service specified endpoint variant data.
+ ServiceVariant ServiceVariant
+}
+
+// GetEndpointVariant returns the EndpointVariant for the variant associated options.
+func (o Options) GetEndpointVariant() (v EndpointVariant) {
+ if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled {
+ v |= DualStackVariant
+ }
+ if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled {
+ v |= FIPSVariant
+ }
+ return v
+}
+
+// Partitions is a slice of partition
+type Partitions []Partition
+
+// ResolveEndpoint resolves a service endpoint for the given region and options.
+func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) {
+ if len(ps) == 0 {
+ return aws.Endpoint{}, fmt.Errorf("no partitions found")
+ }
+
+ if opts.Logger == nil {
+ opts.Logger = logging.Nop{}
+ }
+
+ if len(opts.ResolvedRegion) > 0 {
+ region = opts.ResolvedRegion
+ }
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(region, opts) {
+ continue
+ }
+
+ return ps[i].ResolveEndpoint(region, opts)
+ }
+
+ // fallback to first partition format to use when resolving the endpoint.
+ return ps[0].ResolveEndpoint(region, opts)
+}
+
+// Partition is an AWS partition description for a service and its' region endpoints.
+type Partition struct {
+ ID string
+ RegionRegex *regexp.Regexp
+ PartitionEndpoint string
+ IsRegionalized bool
+ Defaults map[DefaultKey]Endpoint
+ Endpoints Endpoints
+}
+
+func (p Partition) canResolveEndpoint(region string, opts Options) bool {
+ _, ok := p.Endpoints[EndpointKey{
+ Region: region,
+ Variant: opts.GetEndpointVariant(),
+ }]
+ return ok || p.RegionRegex.MatchString(region)
+}
+
+// ResolveEndpoint resolves and service endpoint for the given region and options.
+func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) {
+ if len(region) == 0 && len(p.PartitionEndpoint) != 0 {
+ region = p.PartitionEndpoint
+ }
+
+ endpoints := p.Endpoints
+
+ variant := options.GetEndpointVariant()
+ serviceVariant := options.ServiceVariant
+
+ defaults := p.Defaults[DefaultKey{
+ Variant: variant,
+ ServiceVariant: serviceVariant,
+ }]
+
+ return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options)
+}
+
+func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint {
+ key := EndpointKey{
+ Region: region,
+ Variant: variant,
+ }
+
+ if e, ok := endpoints[key]; ok {
+ return e
+ }
+
+ if !p.IsRegionalized {
+ return endpoints[EndpointKey{
+ Region: p.PartitionEndpoint,
+ Variant: variant,
+ ServiceVariant: serviceVariant,
+ }]
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return Endpoint{}
+}
+
+// Endpoints is a map of service config regions to endpoints
+type Endpoints map[EndpointKey]Endpoint
+
+// CredentialScope is the credential scope of a region and service
+type CredentialScope struct {
+ Region string
+ Service string
+}
+
+// Endpoint is a service endpoint description
+type Endpoint struct {
+ // True if the endpoint cannot be resolved for this partition/region/service
+ Unresolveable aws.Ternary
+
+ Hostname string
+ Protocols []string
+
+ CredentialScope CredentialScope
+
+ SignatureVersions []string
+
+ // Indicates that this endpoint is deprecated.
+ Deprecated aws.Ternary
+}
+
+// IsZero returns whether the endpoint structure is an empty (zero) value.
+func (e Endpoint) IsZero() bool {
+ switch {
+ case e.Unresolveable != aws.UnknownTernary:
+ return false
+ case len(e.Hostname) != 0:
+ return false
+ case len(e.Protocols) != 0:
+ return false
+ case e.CredentialScope != (CredentialScope{}):
+ return false
+ case len(e.SignatureVersions) != 0:
+ return false
+ }
+ return true
+}
+
+func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) {
+ var merged Endpoint
+ merged.mergeIn(def)
+ merged.mergeIn(e)
+ e = merged
+
+ if e.IsZero() {
+ return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region)
+ }
+
+ var u string
+ if e.Unresolveable != aws.TrueTernary {
+ // Only attempt to resolve the endpoint if it can be resolved.
+ hostname := strings.Replace(e.Hostname, "{region}", region, 1)
+
+ scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS)
+ u = scheme + "://" + hostname
+ }
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+ signingName := e.CredentialScope.Service
+
+ if e.Deprecated == aws.TrueTernary && options.LogDeprecated {
+ options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u)
+ }
+
+ return aws.Endpoint{
+ URL: u,
+ PartitionID: partition,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }, nil
+}
+
+func (e *Endpoint) mergeIn(other Endpoint) {
+ if other.Unresolveable != aws.UnknownTernary {
+ e.Unresolveable = other.Unresolveable
+ }
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+ if other.Deprecated != aws.UnknownTernary {
+ e.Deprecated = other.Deprecated
+ }
+}
+
+func getEndpointScheme(protocols []string, disableHTTPS bool) string {
+ if disableHTTPS {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
new file mode 100644
index 000000000..9675feb41
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package endpoints
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "2.7.13"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
new file mode 100644
index 000000000..4791d328c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
@@ -0,0 +1,287 @@
+# v1.8.4 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+
+# v1.8.3 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+
+# v1.8.2 (2025-01-24)
+
+* **Bug Fix**: Refactor filepath.Walk to filepath.WalkDir
+
+# v1.8.1 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+
+# v1.8.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# v1.7.3 (2024-01-22)
+
+* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons.
+
+# v1.7.2 (2023-12-08)
+
+* **Bug Fix**: Correct loading of [services *] sections into shared config.
+
+# v1.7.1 (2023-11-16)
+
+* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it.
+
+# v1.7.0 (2023-11-13)
+
+* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section.
+
+# v1.6.0 (2023-11-09.2)
+
+* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored
+
+# v1.5.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2023-11-07)
+
+* **Bug Fix**: Fix subproperty performance regression
+
+# v1.5.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.45 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.44 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.43 (2023-09-22)
+
+* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0.
+* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats.
+
+# v1.3.42 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.41 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.40 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.39 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.38 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.37 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.36 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.35 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.34 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.33 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.32 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.31 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.30 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.29 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.28 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.27 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.26 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.25 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.24 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.23 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.22 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.21 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.20 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.19 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.18 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.17 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.16 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.15 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.14 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.13 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.12 (2022-05-17)
+
+* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.11 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.10 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.9 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.8 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.7 (2022-03-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.6 (2022-02-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.5 (2022-01-28)
+
+* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug.
+
+# v1.3.4 (2022-01-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2022-01-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.5 (2021-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.4 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2021-08-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-08-04)
+
+* **Feature**: adds error handling for defered close calls
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2021-07-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2021-07-01)
+
+* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values.
+
+# v1.0.1 (2021-06-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.0 (2021-05-20)
+
+* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go
new file mode 100644
index 000000000..0f278d55e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go
@@ -0,0 +1,22 @@
+package ini
+
+import "fmt"
+
+// UnableToReadFile is an error indicating that a ini file could not be read
+type UnableToReadFile struct {
+ Err error
+}
+
+// Error returns an error message and the underlying error message if present
+func (e *UnableToReadFile) Error() string {
+ base := "unable to read file"
+ if e.Err == nil {
+ return base
+ }
+ return fmt.Sprintf("%s: %v", base, e.Err)
+}
+
+// Unwrap returns the underlying error
+func (e *UnableToReadFile) Unwrap() error {
+ return e.Err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
new file mode 100644
index 000000000..f94970e77
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package ini
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.8.4"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
new file mode 100644
index 000000000..cefcce91e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
@@ -0,0 +1,56 @@
+// Package ini implements parsing of the AWS shared config file.
+//
+// Example:
+// sections, err := ini.OpenFile("/path/to/file")
+// if err != nil {
+// panic(err)
+// }
+//
+// profile := "foo"
+// section, ok := sections.GetSection(profile)
+// if !ok {
+// fmt.Printf("section %q could not be found", profile)
+// }
+package ini
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// OpenFile parses shared config from the given file path.
+func OpenFile(path string) (sections Sections, err error) {
+ f, oerr := os.Open(path)
+ if oerr != nil {
+ return Sections{}, &UnableToReadFile{Err: oerr}
+ }
+
+ defer func() {
+ closeErr := f.Close()
+ if err == nil {
+ err = closeErr
+ } else if closeErr != nil {
+ err = fmt.Errorf("close error: %v, original error: %w", closeErr, err)
+ }
+ }()
+
+ return Parse(f, path)
+}
+
+// Parse parses shared config from the given reader.
+func Parse(r io.Reader, path string) (Sections, error) {
+ contents, err := io.ReadAll(r)
+ if err != nil {
+ return Sections{}, fmt.Errorf("read all: %v", err)
+ }
+
+ lines := strings.Split(string(contents), "\n")
+ tokens, err := tokenize(lines)
+ if err != nil {
+ return Sections{}, fmt.Errorf("tokenize: %v", err)
+ }
+
+ return parse(tokens, path), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go
new file mode 100644
index 000000000..2422d9046
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go
@@ -0,0 +1,109 @@
+package ini
+
+import (
+ "fmt"
+ "strings"
+)
+
+func parse(tokens []lineToken, path string) Sections {
+ parser := &parser{
+ path: path,
+ sections: NewSections(),
+ }
+ parser.parse(tokens)
+ return parser.sections
+}
+
+type parser struct {
+ csection, ckey string // current state
+ path string // source file path
+ sections Sections // parse result
+}
+
+func (p *parser) parse(tokens []lineToken) {
+ for _, otok := range tokens {
+ switch tok := otok.(type) {
+ case *lineTokenProfile:
+ p.handleProfile(tok)
+ case *lineTokenProperty:
+ p.handleProperty(tok)
+ case *lineTokenSubProperty:
+ p.handleSubProperty(tok)
+ case *lineTokenContinuation:
+ p.handleContinuation(tok)
+ }
+ }
+}
+
+func (p *parser) handleProfile(tok *lineTokenProfile) {
+ name := tok.Name
+ if tok.Type != "" {
+ name = fmt.Sprintf("%s %s", tok.Type, tok.Name)
+ }
+ p.ckey = ""
+ p.csection = name
+ if _, ok := p.sections.container[name]; !ok {
+ p.sections.container[name] = NewSection(name)
+ }
+}
+
+func (p *parser) handleProperty(tok *lineTokenProperty) {
+ if p.csection == "" {
+ return // LEGACY: don't error on "global" properties
+ }
+
+ p.ckey = tok.Key
+ if _, ok := p.sections.container[p.csection].values[tok.Key]; ok {
+ section := p.sections.container[p.csection]
+ section.Logs = append(p.sections.container[p.csection].Logs,
+ fmt.Sprintf(
+ "For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n",
+ p.csection, tok.Key, tok.Key, p.path,
+ ),
+ )
+ p.sections.container[p.csection] = section
+ }
+
+ p.sections.container[p.csection].values[tok.Key] = Value{
+ str: tok.Value,
+ }
+ p.sections.container[p.csection].SourceFile[tok.Key] = p.path
+}
+
+func (p *parser) handleSubProperty(tok *lineTokenSubProperty) {
+ if p.csection == "" {
+ return // LEGACY: don't error on "global" properties
+ }
+
+ if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" {
+ // This is an "orphaned" subproperty, either because it's at
+ // the beginning of a section or because the last property's
+ // value isn't empty. Either way we're lenient here and
+ // "promote" this to a normal property.
+ p.handleProperty(&lineTokenProperty{
+ Key: tok.Key,
+ Value: strings.TrimSpace(trimPropertyComment(tok.Value)),
+ })
+ return
+ }
+
+ if p.sections.container[p.csection].values[p.ckey].mp == nil {
+ p.sections.container[p.csection].values[p.ckey] = Value{
+ mp: map[string]string{},
+ }
+ }
+ p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value
+}
+
+func (p *parser) handleContinuation(tok *lineTokenContinuation) {
+ if p.ckey == "" {
+ return
+ }
+
+ value, _ := p.sections.container[p.csection].values[p.ckey]
+ if value.str != "" && value.mp == nil {
+ value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value)
+ }
+
+ p.sections.container[p.csection].values[p.ckey] = value
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go
new file mode 100644
index 000000000..dd89848e6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go
@@ -0,0 +1,157 @@
+package ini
+
+import (
+ "sort"
+)
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+ container map[string]Section
+}
+
+// NewSections returns empty ini Sections
+func NewSections() Sections {
+ return Sections{
+ container: make(map[string]Section, 0),
+ }
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+ v, ok := t.container[p]
+ return v, ok
+}
+
+// HasSection denotes if Sections consist of a section with
+// provided name.
+func (t Sections) HasSection(p string) bool {
+ _, ok := t.container[p]
+ return ok
+}
+
+// SetSection sets a section value for provided section name.
+func (t Sections) SetSection(p string, v Section) Sections {
+ t.container[p] = v
+ return t
+}
+
+// DeleteSection deletes a section entry/value for provided section name./
+func (t Sections) DeleteSection(p string) {
+ delete(t.container, p)
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+ keys := make([]string, len(t.container))
+ i := 0
+ for k := range t.container {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+ // Name is the Section profile name
+ Name string
+
+ // values are the values within parsed profile
+ values values
+
+ // Errors is the list of errors
+ Errors []error
+
+ // Logs is the list of logs
+ Logs []string
+
+ // SourceFile is the INI Source file from where this section
+ // was retrieved. They key is the property, value is the
+ // source file the property was retrieved from.
+ SourceFile map[string]string
+}
+
+// NewSection returns an initialize section for the name
+func NewSection(name string) Section {
+ return Section{
+ Name: name,
+ values: values{},
+ SourceFile: map[string]string{},
+ }
+}
+
+// List will return a list of all
+// services in values
+func (t Section) List() []string {
+ keys := make([]string, len(t.values))
+ i := 0
+ for k := range t.values {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// UpdateSourceFile updates source file for a property to provided filepath.
+func (t Section) UpdateSourceFile(property string, filepath string) {
+ t.SourceFile[property] = filepath
+}
+
+// UpdateValue updates value for a provided key with provided value
+func (t Section) UpdateValue(k string, v Value) error {
+ t.values[k] = v
+ return nil
+}
+
+// Has will return whether or not an entry exists in a given section
+func (t Section) Has(k string) bool {
+ _, ok := t.values[k]
+ return ok
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+ v, ok := t.values[k]
+ return v.Type, ok
+}
+
+// Bool returns a bool value at k
+func (t Section) Bool(k string) (bool, bool) {
+ return t.values[k].BoolValue()
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) (int64, bool) {
+ return t.values[k].IntValue()
+}
+
+// Map returns a map value at k
+func (t Section) Map(k string) map[string]string {
+ return t.values[k].MapValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) (float64, bool) {
+ return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+ _, ok := t.values[k]
+ if !ok {
+ return ""
+ }
+ return t.values[k].StringValue()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go
new file mode 100644
index 000000000..ed77d0835
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go
@@ -0,0 +1,89 @@
+package ini
+
+import (
+ "strings"
+)
+
+func trimProfileComment(s string) string {
+ r, _, _ := strings.Cut(s, "#")
+ r, _, _ = strings.Cut(r, ";")
+ return r
+}
+
+func trimPropertyComment(s string) string {
+ r, _, _ := strings.Cut(s, " #")
+ r, _, _ = strings.Cut(r, " ;")
+ r, _, _ = strings.Cut(r, "\t#")
+ r, _, _ = strings.Cut(r, "\t;")
+ return r
+}
+
+// assumes no surrounding comment
+func splitProperty(s string) (string, string, bool) {
+ equalsi := strings.Index(s, "=")
+ coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment
+ sep := "="
+ if equalsi == -1 || coloni != -1 && coloni < equalsi {
+ sep = ":"
+ }
+
+ k, v, ok := strings.Cut(s, sep)
+ if !ok {
+ return "", "", false
+ }
+ return strings.TrimSpace(k), strings.TrimSpace(v), true
+}
+
+// assumes no surrounding comment, whitespace, or profile brackets
+func splitProfile(s string) (string, string) {
+ var first int
+ for i, r := range s {
+ if isLineSpace(r) {
+ if first == 0 {
+ first = i
+ }
+ } else {
+ if first != 0 {
+ return s[:first], s[i:]
+ }
+ }
+ }
+ if first == 0 {
+ return "", s // type component is effectively blank
+ }
+ return "", ""
+}
+
+func isLineSpace(r rune) bool {
+ return r == ' ' || r == '\t'
+}
+
+func unquote(s string) string {
+ if isSingleQuoted(s) || isDoubleQuoted(s) {
+ return s[1 : len(s)-1]
+ }
+ return s
+}
+
+// applies various legacy conversions to property values:
+// - remote wrapping single/doublequotes
+func legacyStrconv(s string) string {
+ s = unquote(s)
+ return s
+}
+
+func isSingleQuoted(s string) bool {
+ return hasAffixes(s, "'", "'")
+}
+
+func isDoubleQuoted(s string) bool {
+ return hasAffixes(s, `"`, `"`)
+}
+
+func isBracketed(s string) bool {
+ return hasAffixes(s, "[", "]")
+}
+
+func hasAffixes(s, left, right string) bool {
+ return strings.HasPrefix(s, left) && strings.HasSuffix(s, right)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go
new file mode 100644
index 000000000..6e9a03744
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go
@@ -0,0 +1,32 @@
+package ini
+
+type lineToken interface {
+ isLineToken()
+}
+
+type lineTokenProfile struct {
+ Type string
+ Name string
+}
+
+func (*lineTokenProfile) isLineToken() {}
+
+type lineTokenProperty struct {
+ Key string
+ Value string
+}
+
+func (*lineTokenProperty) isLineToken() {}
+
+type lineTokenContinuation struct {
+ Value string
+}
+
+func (*lineTokenContinuation) isLineToken() {}
+
+type lineTokenSubProperty struct {
+ Key string
+ Value string
+}
+
+func (*lineTokenSubProperty) isLineToken() {}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go
new file mode 100644
index 000000000..89a773684
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go
@@ -0,0 +1,92 @@
+package ini
+
+import (
+ "strings"
+)
+
+func tokenize(lines []string) ([]lineToken, error) {
+ tokens := make([]lineToken, 0, len(lines))
+ for _, line := range lines {
+ if len(strings.TrimSpace(line)) == 0 || isLineComment(line) {
+ continue
+ }
+
+ if tok := asProfile(line); tok != nil {
+ tokens = append(tokens, tok)
+ } else if tok := asProperty(line); tok != nil {
+ tokens = append(tokens, tok)
+ } else if tok := asSubProperty(line); tok != nil {
+ tokens = append(tokens, tok)
+ } else if tok := asContinuation(line); tok != nil {
+ tokens = append(tokens, tok)
+ } // unrecognized tokens are effectively ignored
+ }
+ return tokens, nil
+}
+
+func isLineComment(line string) bool {
+ trimmed := strings.TrimLeft(line, " \t")
+ return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";")
+}
+
+func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment"
+ trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]"
+ if !isBracketed(trimmed) {
+ return nil
+ }
+ trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ")
+ trimmed = strings.TrimSpace(trimmed) // "type name" / "name"
+ typ, name := splitProfile(trimmed)
+ return &lineTokenProfile{
+ Type: typ,
+ Name: name,
+ }
+}
+
+func asProperty(line string) *lineTokenProperty {
+ if isLineSpace(rune(line[0])) {
+ return nil
+ }
+
+ trimmed := trimPropertyComment(line)
+ trimmed = strings.TrimRight(trimmed, " \t")
+ k, v, ok := splitProperty(trimmed)
+ if !ok {
+ return nil
+ }
+
+ return &lineTokenProperty{
+ Key: strings.ToLower(k), // LEGACY: normalize key case
+ Value: legacyStrconv(v), // LEGACY: see func docs
+ }
+}
+
+func asSubProperty(line string) *lineTokenSubProperty {
+ if !isLineSpace(rune(line[0])) {
+ return nil
+ }
+
+ // comments on sub-properties are included in the value
+ trimmed := strings.TrimLeft(line, " \t")
+ k, v, ok := splitProperty(trimmed)
+ if !ok {
+ return nil
+ }
+
+ return &lineTokenSubProperty{ // same LEGACY constraints as in normal property
+ Key: strings.ToLower(k),
+ Value: legacyStrconv(v),
+ }
+}
+
+func asContinuation(line string) *lineTokenContinuation {
+ if !isLineSpace(rune(line[0])) {
+ return nil
+ }
+
+ // includes comments like sub-properties
+ trimmed := strings.TrimLeft(line, " \t")
+ return &lineTokenContinuation{
+ Value: trimmed,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go
new file mode 100644
index 000000000..e3706b3c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go
@@ -0,0 +1,93 @@
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+ switch v {
+ case NoneType:
+ return "NONE"
+ case StringType:
+ return "STRING"
+ }
+
+ return ""
+}
+
+// ValueType enums
+const (
+ NoneType = ValueType(iota)
+ StringType
+ QuotedStringType
+)
+
+// Value is a union container
+type Value struct {
+ Type ValueType
+
+ str string
+ mp map[string]string
+}
+
+// NewStringValue returns a Value type generated using a string input.
+func NewStringValue(str string) (Value, error) {
+ return Value{str: str}, nil
+}
+
+func (v Value) String() string {
+ switch v.Type {
+ case StringType:
+ return fmt.Sprintf("string: %s", string(v.str))
+ case QuotedStringType:
+ return fmt.Sprintf("quoted string: %s", string(v.str))
+ default:
+ return "union not set"
+ }
+}
+
+// MapValue returns a map value for sub properties
+func (v Value) MapValue() map[string]string {
+ return v.mp
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() (int64, bool) {
+ i, err := strconv.ParseInt(string(v.str), 0, 64)
+ if err != nil {
+ return 0, false
+ }
+ return i, true
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() (float64, bool) {
+ f, err := strconv.ParseFloat(string(v.str), 64)
+ if err != nil {
+ return 0, false
+ }
+ return f, true
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() (bool, bool) {
+ // we don't use ParseBool as it recognizes more than what we've
+ // historically supported
+ if strings.EqualFold(v.str, "true") {
+ return true, true
+ } else if strings.EqualFold(v.str, "false") {
+ return false, true
+ }
+ return false, false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+ return v.str
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go
new file mode 100644
index 000000000..8e24a3f0a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go
@@ -0,0 +1,42 @@
+package middleware
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// AddTimeOffsetMiddleware sets a value representing clock skew on the request context.
+// This can be read by other operations (such as signing) to correct the date value they send
+// on the request
+type AddTimeOffsetMiddleware struct {
+ Offset *atomic.Int64
+}
+
+// ID the identifier for AddTimeOffsetMiddleware
+func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" }
+
+// HandleBuild sets a value for attemptSkew on the request context if one is set on the client.
+func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ if m.Offset != nil {
+ offset := time.Duration(m.Offset.Load())
+ ctx = internalcontext.SetAttemptSkewContext(ctx, offset)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer
+// held by AddTimeOffsetMiddleware
+func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 {
+ m.Offset.Store(v.Nanoseconds())
+ }
+ return next.HandleDeserialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go
new file mode 100644
index 000000000..c8484dcd7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go
@@ -0,0 +1,33 @@
+package rand
+
+import (
+ "crypto/rand"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+func init() {
+ Reader = rand.Reader
+}
+
+// Reader provides a random reader that can reset during testing.
+var Reader io.Reader
+
+var floatMaxBigInt = big.NewInt(1 << 53)
+
+// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0).
+func Float64(reader io.Reader) (float64, error) {
+ bi, err := rand.Int(reader, floatMaxBigInt)
+ if err != nil {
+ return 0, fmt.Errorf("failed to read random value, %v", err)
+ }
+
+ return float64(bi.Int64()) / (1 << 53), nil
+}
+
+// CryptoRandFloat64 returns a random float64 obtained from the crypto rand
+// source.
+func CryptoRandFloat64() (float64, error) {
+ return Float64(Reader)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go
new file mode 100644
index 000000000..2b42cbe64
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go
@@ -0,0 +1,9 @@
+package sdk
+
+// Invalidator provides access to a type's invalidate method to make it
+// invalidate it cache.
+//
+// e.g aws.SafeCredentialsProvider's Invalidate method.
+type Invalidator interface {
+ Invalidate()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go
new file mode 100644
index 000000000..8e8dabad5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go
@@ -0,0 +1,74 @@
+package sdk
+
+import (
+ "context"
+ "time"
+)
+
+func init() {
+ NowTime = time.Now
+ Sleep = time.Sleep
+ SleepWithContext = sleepWithContext
+}
+
+// NowTime is a value for getting the current time. This value can be overridden
+// for testing mocking out current time.
+var NowTime func() time.Time
+
+// Sleep is a value for sleeping for a duration. This value can be overridden
+// for testing and mocking out sleep duration.
+var Sleep func(time.Duration)
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// This value can be overridden for testing and mocking out sleep duration.
+var SleepWithContext func(context.Context, time.Duration) error
+
+// sleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the
+// Context's error will be returned.
+func sleepWithContext(ctx context.Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
+
+// noOpSleepWithContext does nothing, returns immediately.
+func noOpSleepWithContext(context.Context, time.Duration) error {
+ return nil
+}
+
+func noOpSleep(time.Duration) {}
+
+// TestingUseNopSleep is a utility for disabling sleep across the SDK for
+// testing.
+func TestingUseNopSleep() func() {
+ SleepWithContext = noOpSleepWithContext
+ Sleep = noOpSleep
+
+ return func() {
+ SleepWithContext = sleepWithContext
+ Sleep = time.Sleep
+ }
+}
+
+// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time
+// for testing purposes.
+func TestingUseReferenceTime(referenceTime time.Time) func() {
+ NowTime = func() time.Time {
+ return referenceTime
+ }
+ return func() {
+ NowTime = time.Now
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go
new file mode 100644
index 000000000..6c443988b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go
@@ -0,0 +1,12 @@
+package sdkio
+
+const (
+ // Byte is 8 bits
+ Byte int64 = 1
+ // KibiByte (KiB) is 1024 Bytes
+ KibiByte = Byte * 1024
+ // MebiByte (MiB) is 1024 KiB
+ MebiByte = KibiByte * 1024
+ // GibiByte (GiB) is 1024 MiB
+ GibiByte = MebiByte * 1024
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go
new file mode 100644
index 000000000..c96b717e0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go
@@ -0,0 +1,47 @@
+package shareddefaults
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "credentials")
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "config")
+}
+
+// UserHomeDir returns the home directory for the user the process is
+// running under.
+func UserHomeDir() string {
+ // Ignore errors since we only care about Windows and *nix.
+ home, _ := os.UserHomeDir()
+
+ if len(home) > 0 {
+ return home
+ }
+
+ currUser, _ := user.Current()
+ if currUser != nil {
+ home = currUser.HomeDir
+ }
+
+ return home
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go
new file mode 100644
index 000000000..d008ae27c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go
@@ -0,0 +1,11 @@
+package strings
+
+import (
+ "strings"
+)
+
+// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
+// under Unicode case-folding.
+func HasPrefixFold(s, prefix string) bool {
+ return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE
new file mode 100644
index 000000000..fe6a62006
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go
new file mode 100644
index 000000000..cb70616e8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go
@@ -0,0 +1,7 @@
+// Package singleflight provides a duplicate function call suppression
+// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight
+// package. The package is forked, because the package a part of the unstable
+// and unversioned golang.org/x/sync module.
+//
+// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight
+package singleflight
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go
new file mode 100644
index 000000000..e8a1b17d5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package singleflight
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "sync"
+)
+
+// errGoexit indicates the runtime.Goexit was called in
+// the user given function.
+var errGoexit = errors.New("runtime.Goexit was called")
+
+// A panicError is an arbitrary value recovered from a panic
+// with the stack trace during the execution of given function.
+type panicError struct {
+ value interface{}
+ stack []byte
+}
+
+// Error implements error interface.
+func (p *panicError) Error() string {
+ return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
+}
+
+func newPanicError(v interface{}) error {
+ stack := debug.Stack()
+
+ // The first line of the stack trace is of the form "goroutine N [status]:"
+ // but by the time the panic reaches Do the goroutine may no longer exist
+ // and its status will have changed. Trim out the misleading line.
+ if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
+ stack = stack[line+1:]
+ }
+ return &panicError{value: v, stack: stack}
+}
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+
+ // These fields are written once before the WaitGroup is done
+ // and are only read after the WaitGroup is done.
+ val interface{}
+ err error
+
+ // forgotten indicates whether Forget was called with this call's key
+ // while the call was still in flight.
+ forgotten bool
+
+ // These fields are read and written with the singleflight
+ // mutex held before the WaitGroup is done, and are read but
+ // not written after the WaitGroup is done.
+ dups int
+ chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+ mu sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+ Val interface{}
+ Err error
+ Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.mu.Unlock()
+ c.wg.Wait()
+
+ if e, ok := c.err.(*panicError); ok {
+ panic(e)
+ } else if c.err == errGoexit {
+ runtime.Goexit()
+ }
+ return c.val, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ g.doCall(c, key, fn)
+ return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+//
+// The returned channel will not be closed.
+func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
+ ch := make(chan Result, 1)
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ c.chans = append(c.chans, ch)
+ g.mu.Unlock()
+ return ch
+ }
+ c := &call{chans: []chan<- Result{ch}}
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ go g.doCall(c, key, fn)
+
+ return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
+ normalReturn := false
+ recovered := false
+
+ // use double-defer to distinguish panic from runtime.Goexit,
+ // more details see https://golang.org/cl/134395
+ defer func() {
+ // the given function invoked runtime.Goexit
+ if !normalReturn && !recovered {
+ c.err = errGoexit
+ }
+
+ c.wg.Done()
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ if !c.forgotten {
+ delete(g.m, key)
+ }
+
+ if e, ok := c.err.(*panicError); ok {
+ // In order to prevent the waiting channels from being blocked forever,
+ // needs to ensure that this panic cannot be recovered.
+ if len(c.chans) > 0 {
+ go panic(e)
+ select {} // Keep this goroutine around so that it will appear in the crash dump.
+ } else {
+ panic(e)
+ }
+ } else if c.err == errGoexit {
+ // Already in the process of goexit, no need to call again
+ } else {
+ // Normal return
+ for _, ch := range c.chans {
+ ch <- Result{c.val, c.err, c.dups > 0}
+ }
+ }
+ }()
+
+ func() {
+ defer func() {
+ if !normalReturn {
+ // Ideally, we would wait to take a stack trace until we've determined
+ // whether this is a panic or a runtime.Goexit.
+ //
+ // Unfortunately, the only way we can distinguish the two is to see
+ // whether the recover stopped the goroutine from terminating, and by
+ // the time we know that, the part of the stack trace relevant to the
+ // panic has been discarded.
+ if r := recover(); r != nil {
+ c.err = newPanicError(r)
+ }
+ }
+ }()
+
+ c.val, c.err = fn()
+ normalReturn = true
+ }()
+
+ if !normalReturn {
+ recovered = true
+ }
+}
+
+// Forget tells the singleflight to forget about a key. Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group) Forget(key string) {
+ g.mu.Lock()
+ if c, ok := g.m[key]; ok {
+ c.forgotten = true
+ }
+ delete(g.m, key)
+ g.mu.Unlock()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go
new file mode 100644
index 000000000..5d69db5f2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go
@@ -0,0 +1,13 @@
+package timeconv
+
+import "time"
+
+// FloatSecondsDur converts a fractional seconds to duration.
+func FloatSecondsDur(v float64) time.Duration {
+ return time.Duration(v * float64(time.Second))
+}
+
+// DurSecondsFloat converts a duration into fractional seconds.
+func DurSecondsFloat(d time.Duration) float64 {
+ return float64(d) / float64(time.Second)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
new file mode 100644
index 000000000..c574867d0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
@@ -0,0 +1,338 @@
+# v1.3.34 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.33 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.32 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.31 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.30 (2025-01-30)
+
+* **Bug Fix**: Do not sign Transfer-Encoding header in Sigv4[a]. Fixes a signer mismatch issue with S3 Accelerate.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.29 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.3.28 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.27 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.26 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.25 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.24 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.23 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.22 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.21 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.20 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.19 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.18 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.17 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.16 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.15 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.14 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.13 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.12 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.11 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.10 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.9 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.8 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.5 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.6 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.5 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.4 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.3 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.2 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.28 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.27 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.26 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.25 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.24 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.23 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.22 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.21 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.20 (2023-02-14)
+
+* No change notes available for this release.
+
+# v1.0.19 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.18 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.17 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.16 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.15 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.14 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.13 (2022-09-14)
+
+* **Bug Fix**: Fixes an issues where an error from an underlying SigV4 credential provider would not be surfaced from the SigV4a credential provider. Contribution by [sakthipriyan-aqfer](https://github.com/sakthipriyan-aqfer).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.12 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.11 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.10 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.9 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.8 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.7 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.6 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.5 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.4 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.3 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.2 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.1 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.0 (2022-04-07)
+
+* **Release**: New internal v4a signing module location.
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go
new file mode 100644
index 000000000..3ae3a019e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/credentials.go
@@ -0,0 +1,141 @@
+package v4a
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+)
+
+// Credentials is Context, ECDSA, and Optional Session Token that can be used
+// to sign requests using SigV4a
+type Credentials struct {
+ Context string
+ PrivateKey *ecdsa.PrivateKey
+ SessionToken string
+
+ // Time the credentials will expire.
+ CanExpire bool
+ Expires time.Time
+}
+
+// Expired returns if the credentials have expired.
+func (v Credentials) Expired() bool {
+ if v.CanExpire {
+ return !v.Expires.After(sdk.NowTime())
+ }
+
+ return false
+}
+
+// HasKeys returns if the credentials keys are set.
+func (v Credentials) HasKeys() bool {
+ return len(v.Context) > 0 && v.PrivateKey != nil
+}
+
+// SymmetricCredentialAdaptor wraps a SigV4 AccessKey/SecretKey provider and adapts the credentials
+// to a ECDSA PrivateKey for signing with SiV4a
+type SymmetricCredentialAdaptor struct {
+ SymmetricProvider aws.CredentialsProvider
+
+ asymmetric atomic.Value
+ m sync.Mutex
+}
+
+// Retrieve retrieves symmetric credentials from the underlying provider.
+func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ symCreds, err := s.retrieveFromSymmetricProvider(ctx)
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+
+ if asymCreds := s.getCreds(); asymCreds == nil {
+ return symCreds, nil
+ }
+
+ s.m.Lock()
+ defer s.m.Unlock()
+
+ asymCreds := s.getCreds()
+ if asymCreds == nil {
+ return symCreds, nil
+ }
+
+ // if the context does not match the access key id clear it
+ if asymCreds.Context != symCreds.AccessKeyID {
+ s.asymmetric.Store((*Credentials)(nil))
+ }
+
+ return symCreds, nil
+}
+
+// RetrievePrivateKey returns credentials suitable for SigV4a signing
+func (s *SymmetricCredentialAdaptor) RetrievePrivateKey(ctx context.Context) (Credentials, error) {
+ if asymCreds := s.getCreds(); asymCreds != nil {
+ return *asymCreds, nil
+ }
+
+ s.m.Lock()
+ defer s.m.Unlock()
+
+ if asymCreds := s.getCreds(); asymCreds != nil {
+ return *asymCreds, nil
+ }
+
+ symmetricCreds, err := s.retrieveFromSymmetricProvider(ctx)
+ if err != nil {
+ return Credentials{}, fmt.Errorf("failed to retrieve symmetric credentials: %v", err)
+ }
+
+ privateKey, err := deriveKeyFromAccessKeyPair(symmetricCreds.AccessKeyID, symmetricCreds.SecretAccessKey)
+ if err != nil {
+ return Credentials{}, fmt.Errorf("failed to derive assymetric key from credentials")
+ }
+
+ creds := Credentials{
+ Context: symmetricCreds.AccessKeyID,
+ PrivateKey: privateKey,
+ SessionToken: symmetricCreds.SessionToken,
+ CanExpire: symmetricCreds.CanExpire,
+ Expires: symmetricCreds.Expires,
+ }
+
+ s.asymmetric.Store(&creds)
+
+ return creds, nil
+}
+
+func (s *SymmetricCredentialAdaptor) getCreds() *Credentials {
+ v := s.asymmetric.Load()
+
+ if v == nil {
+ return nil
+ }
+
+ c := v.(*Credentials)
+ if c != nil && c.HasKeys() && !c.Expired() {
+ return c
+ }
+
+ return nil
+}
+
+func (s *SymmetricCredentialAdaptor) retrieveFromSymmetricProvider(ctx context.Context) (aws.Credentials, error) {
+ credentials, err := s.SymmetricProvider.Retrieve(ctx)
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+
+ return credentials, nil
+}
+
+// CredentialsProvider is the interface for a provider to retrieve credentials
+// to sign requests with.
+type CredentialsProvider interface {
+ RetrievePrivateKey(context.Context) (Credentials, error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go
new file mode 100644
index 000000000..380d17427
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/error.go
@@ -0,0 +1,17 @@
+package v4a
+
+import "fmt"
+
+// SigningError indicates an error condition occurred while performing SigV4a signing
+type SigningError struct {
+ Err error
+}
+
+func (e *SigningError) Error() string {
+ return fmt.Sprintf("failed to sign request: %v", e.Err)
+}
+
+// Unwrap returns the underlying error cause
+func (e *SigningError) Unwrap() error {
+ return e.Err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
new file mode 100644
index 000000000..b6c06c704
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package v4a
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.3.34"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go
new file mode 100644
index 000000000..1d0f25f8c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/compare.go
@@ -0,0 +1,30 @@
+package crypto
+
+import "fmt"
+
+// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison
+// if the two byte slices assuming they represent a big-endian number.
+//
+// error if len(x) != len(y)
+// -1 if x < y
+// 0 if x == y
+// +1 if x > y
+func ConstantTimeByteCompare(x, y []byte) (int, error) {
+ if len(x) != len(y) {
+ return 0, fmt.Errorf("slice lengths do not match")
+ }
+
+ xLarger, yLarger := 0, 0
+
+ for i := 0; i < len(x); i++ {
+ xByte, yByte := int(x[i]), int(y[i])
+
+ x := ((yByte - xByte) >> 8) & 1
+ y := ((xByte - yByte) >> 8) & 1
+
+ xLarger |= x &^ yLarger
+ yLarger |= y &^ xLarger
+ }
+
+ return xLarger - yLarger, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go
new file mode 100644
index 000000000..758c73fcb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto/ecc.go
@@ -0,0 +1,113 @@
+package crypto
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/hmac"
+ "encoding/asn1"
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "math"
+ "math/big"
+)
+
+type ecdsaSignature struct {
+ R, S *big.Int
+}
+
+// ECDSAKey takes the given elliptic curve, and private key (d) byte slice
+// and returns the private ECDSA key.
+func ECDSAKey(curve elliptic.Curve, d []byte) *ecdsa.PrivateKey {
+ return ECDSAKeyFromPoint(curve, (&big.Int{}).SetBytes(d))
+}
+
+// ECDSAKeyFromPoint takes the given elliptic curve and point and returns the
+// private and public keypair
+func ECDSAKeyFromPoint(curve elliptic.Curve, d *big.Int) *ecdsa.PrivateKey {
+ pX, pY := curve.ScalarBaseMult(d.Bytes())
+
+ privKey := &ecdsa.PrivateKey{
+ PublicKey: ecdsa.PublicKey{
+ Curve: curve,
+ X: pX,
+ Y: pY,
+ },
+ D: d,
+ }
+
+ return privKey
+}
+
+// ECDSAPublicKey takes the provide curve and (x, y) coordinates and returns
+// *ecdsa.PublicKey. Returns an error if the given points are not on the curve.
+func ECDSAPublicKey(curve elliptic.Curve, x, y []byte) (*ecdsa.PublicKey, error) {
+ xPoint := (&big.Int{}).SetBytes(x)
+ yPoint := (&big.Int{}).SetBytes(y)
+
+ if !curve.IsOnCurve(xPoint, yPoint) {
+ return nil, fmt.Errorf("point(%v, %v) is not on the given curve", xPoint.String(), yPoint.String())
+ }
+
+ return &ecdsa.PublicKey{
+ Curve: curve,
+ X: xPoint,
+ Y: yPoint,
+ }, nil
+}
+
+// VerifySignature takes the provided public key, hash, and asn1 encoded signature and returns
+// whether the given signature is valid.
+func VerifySignature(key *ecdsa.PublicKey, hash []byte, signature []byte) (bool, error) {
+ var ecdsaSignature ecdsaSignature
+
+ _, err := asn1.Unmarshal(signature, &ecdsaSignature)
+ if err != nil {
+ return false, err
+ }
+
+ return ecdsa.Verify(key, hash, ecdsaSignature.R, ecdsaSignature.S), nil
+}
+
+// HMACKeyDerivation provides an implementation of a NIST-800-108 of a KDF (Key Derivation Function) in Counter Mode.
+// For the purposes of this implantation HMAC is used as the PRF (Pseudorandom function), where the value of
+// `r` is defined as a 4 byte counter.
+func HMACKeyDerivation(hash func() hash.Hash, bitLen int, key []byte, label, context []byte) ([]byte, error) {
+ // verify that we won't overflow the counter
+ n := int64(math.Ceil((float64(bitLen) / 8) / float64(hash().Size())))
+ if n > 0x7FFFFFFF {
+ return nil, fmt.Errorf("unable to derive key of size %d using 32-bit counter", bitLen)
+ }
+
+ // verify the requested bit length is not larger then the length encoding size
+ if int64(bitLen) > 0x7FFFFFFF {
+ return nil, fmt.Errorf("bitLen is greater than 32-bits")
+ }
+
+ fixedInput := bytes.NewBuffer(nil)
+ fixedInput.Write(label)
+ fixedInput.WriteByte(0x00)
+ fixedInput.Write(context)
+ if err := binary.Write(fixedInput, binary.BigEndian, int32(bitLen)); err != nil {
+ return nil, fmt.Errorf("failed to write bit length to fixed input string: %v", err)
+ }
+
+ var output []byte
+
+ h := hmac.New(hash, key)
+
+ for i := int64(1); i <= n; i++ {
+ h.Reset()
+ if err := binary.Write(h, binary.BigEndian, int32(i)); err != nil {
+ return nil, err
+ }
+ _, err := h.Write(fixedInput.Bytes())
+ if err != nil {
+ return nil, err
+ }
+ output = append(output, h.Sum(nil)...)
+ }
+
+ return output[:bitLen/8], nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go
new file mode 100644
index 000000000..89a76e2ea
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/const.go
@@ -0,0 +1,36 @@
+package v4
+
+const (
+ // EmptyStringSHA256 is the hex encoded sha256 value of an empty string
+ EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+
+ // UnsignedPayload indicates that the request payload body is unsigned
+ UnsignedPayload = "UNSIGNED-PAYLOAD"
+
+ // AmzAlgorithmKey indicates the signing algorithm
+ AmzAlgorithmKey = "X-Amz-Algorithm"
+
+ // AmzSecurityTokenKey indicates the security token to be used with temporary credentials
+ AmzSecurityTokenKey = "X-Amz-Security-Token"
+
+ // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z'
+ AmzDateKey = "X-Amz-Date"
+
+ // AmzCredentialKey is the access key ID and credential scope
+ AmzCredentialKey = "X-Amz-Credential"
+
+ // AmzSignedHeadersKey is the set of headers signed for the request
+ AmzSignedHeadersKey = "X-Amz-SignedHeaders"
+
+ // AmzSignatureKey is the query parameter to store the SigV4 signature
+ AmzSignatureKey = "X-Amz-Signature"
+
+ // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter
+ TimeFormat = "20060102T150405Z"
+
+ // ShortTimeFormat is the shorten time format used in the credential scope
+ ShortTimeFormat = "20060102"
+
+ // ContentSHAKey is the SHA256 of request body
+ ContentSHAKey = "X-Amz-Content-Sha256"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go
new file mode 100644
index 000000000..a15177e8f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings"
+)
+
+// Rules houses a set of Rule needed for validation of a
+// string value
+type Rules []Rule
+
+// Rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that Rule
+type Rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r Rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// MapRule generic Rule for maps
+type MapRule map[string]struct{}
+
+// IsValid for the map Rule satisfies whether it exists in the map
+func (m MapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// AllowList is a generic Rule for whitelisting
+type AllowList struct {
+ Rule
+}
+
+// IsValid for AllowList checks if the value is within the AllowList
+func (w AllowList) IsValid(value string) bool {
+ return w.Rule.IsValid(value)
+}
+
+// DenyList is a generic Rule for blacklisting
+type DenyList struct {
+ Rule
+}
+
+// IsValid for AllowList checks if the value is within the AllowList
+func (b DenyList) IsValid(value string) bool {
+ return !b.Rule.IsValid(value)
+}
+
+// Patterns is a list of strings to match against
+type Patterns []string
+
+// IsValid for Patterns checks each pattern and returns if a match has
+// been found
+func (p Patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if sdkstrings.HasPrefixFold(value, pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// InclusiveRules rules allow for rules to depend on one another
+type InclusiveRules []Rule
+
+// IsValid will return true if all rules are true
+func (r InclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go
new file mode 100644
index 000000000..688f83474
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go
@@ -0,0 +1,68 @@
+package v4
+
+// IgnoredHeaders is a list of headers that are ignored during signing
+var IgnoredHeaders = Rules{
+ DenyList{
+ MapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ "Transfer-Encoding": struct{}{},
+ },
+ },
+}
+
+// RequiredSignedHeaders is a whitelist for Build canonical headers.
+var RequiredSignedHeaders = Rules{
+ AllowList{
+ MapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ "X-Amz-Content-Sha256": struct{}{},
+ "X-Amz-Tagging": struct{}{},
+ },
+ },
+ Patterns{"X-Amz-Meta-"},
+}
+
+// AllowedQueryHoisting is a whitelist for Build query headers. The boolean value
+// represents whether or not it is a pattern.
+var AllowedQueryHoisting = InclusiveRules{
+ DenyList{RequiredSignedHeaders},
+ Patterns{"X-Amz-"},
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go
new file mode 100644
index 000000000..e7fa7a1b1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/hmac.go
@@ -0,0 +1,13 @@
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+)
+
+// HMACSHA256 computes a HMAC-SHA256 of data given the provided key.
+func HMACSHA256(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go
new file mode 100644
index 000000000..bf93659a4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/host.go
@@ -0,0 +1,75 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// SanitizeHostForHeader removes default port from host and updates request.Host
+func SanitizeHostForHeader(r *http.Request) {
+ host := getHost(r)
+ port := portOnly(host)
+ if port != "" && isDefaultPort(r.URL.Scheme, port) {
+ r.Host = stripPort(host)
+ }
+}
+
+// Returns host from request
+func getHost(r *http.Request) string {
+ if r.Host != "" {
+ return r.Host
+ }
+
+ return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+ if port == "" {
+ return true
+ }
+
+ lowerCaseScheme := strings.ToLower(scheme)
+ if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go
new file mode 100644
index 000000000..1de06a765
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/time.go
@@ -0,0 +1,36 @@
+package v4
+
+import "time"
+
+// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing.
+type SigningTime struct {
+ time.Time
+ timeFormat string
+ shortTimeFormat string
+}
+
+// NewSigningTime creates a new SigningTime given a time.Time
+func NewSigningTime(t time.Time) SigningTime {
+ return SigningTime{
+ Time: t,
+ }
+}
+
+// TimeFormat provides a time formatted in the X-Amz-Date format.
+func (m *SigningTime) TimeFormat() string {
+ return m.format(&m.timeFormat, TimeFormat)
+}
+
+// ShortTimeFormat provides a time formatted of 20060102.
+func (m *SigningTime) ShortTimeFormat() string {
+ return m.format(&m.shortTimeFormat, ShortTimeFormat)
+}
+
+func (m *SigningTime) format(target *string, format string) string {
+ if len(*target) > 0 {
+ return *target
+ }
+ v := m.Time.Format(format)
+ *target = v
+ return v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go
new file mode 100644
index 000000000..741019b5f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/util.go
@@ -0,0 +1,64 @@
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+const doubleSpace = " "
+
+// StripExcessSpaces will rewrite the passed in slice's string values to not
+// contain muliple side-by-side spaces.
+func StripExcessSpaces(str string) string {
+ var j, k, l, m, spaces int
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
+
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
+
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ return str
+ }
+
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
+ }
+ spaces++
+ } else {
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
+ }
+ }
+
+ return string(buf[:m])
+}
+
+// GetURIPath returns the escaped URI component from the provided URL
+func GetURIPath(u *url.URL) string {
+ var uri string
+
+ if len(u.Opaque) > 0 {
+ uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+ } else {
+ uri = u.EscapedPath()
+ }
+
+ if len(uri) == 0 {
+ uri = "/"
+ }
+
+ return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go
new file mode 100644
index 000000000..64b8b4e33
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go
@@ -0,0 +1,118 @@
+package v4a
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "time"
+)
+
+// HTTPSigner is SigV4a HTTP signer implementation
+type HTTPSigner interface {
+ SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optfns ...func(*SignerOptions)) error
+}
+
+// SignHTTPRequestMiddlewareOptions is the middleware options for constructing a SignHTTPRequestMiddleware.
+type SignHTTPRequestMiddlewareOptions struct {
+ Credentials CredentialsProvider
+ Signer HTTPSigner
+ LogSigning bool
+}
+
+// SignHTTPRequestMiddleware is a middleware for signing an HTTP request using SigV4a.
+type SignHTTPRequestMiddleware struct {
+ credentials CredentialsProvider
+ signer HTTPSigner
+ logSigning bool
+}
+
+// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given SignHTTPRequestMiddlewareOptions.
+func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware {
+ return &SignHTTPRequestMiddleware{
+ credentials: options.Credentials,
+ signer: options.Signer,
+ logSigning: options.LogSigning,
+ }
+}
+
+// ID the middleware identifier.
+func (s *SignHTTPRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+// HandleFinalize signs an HTTP request using SigV4a.
+func (s *SignHTTPRequestMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if !hasCredentialProvider(s.credentials) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected request middleware type %T", in.Request)
+ }
+
+ signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx)
+ payloadHash := v4.GetPayloadHash(ctx)
+ if len(payloadHash) == 0 {
+ return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")}
+ }
+
+ credentials, err := s.credentials.RetrievePrivateKey(ctx)
+ if err != nil {
+ return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)}
+ }
+
+ signerOptions := []func(o *SignerOptions){
+ func(o *SignerOptions) {
+ o.Logger = middleware.GetLogger(ctx)
+ o.LogSigning = s.logSigning
+ },
+ }
+
+ // existing DisableURIPathEscaping is equivalent in purpose
+ // to authentication scheme property DisableDoubleEncoding
+ disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx)
+ if overridden {
+ signerOptions = append(signerOptions, func(o *SignerOptions) {
+ o.DisableURIPathEscaping = disableDoubleEncoding
+ })
+ }
+
+ err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, []string{signingRegion}, time.Now().UTC(), signerOptions...)
+ if err != nil {
+ return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)}
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func hasCredentialProvider(p CredentialsProvider) bool {
+ if p == nil {
+ return false
+ }
+
+ return true
+}
+
+// RegisterSigningMiddleware registers the SigV4a signing middleware to the stack. If a signing middleware is already
+// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the
+// finalize step.
+func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) {
+ const signedID = "Signing"
+ _, present := stack.Finalize.Get(signedID)
+ if present {
+ _, err = stack.Finalize.Swap(signedID, signingMiddleware)
+ } else {
+ err = stack.Finalize.Add(signingMiddleware, middleware.After)
+ }
+ return err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go
new file mode 100644
index 000000000..951fc415d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/presign_middleware.go
@@ -0,0 +1,117 @@
+package v4a
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "time"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go/middleware"
+ smithyHTTP "github.com/aws/smithy-go/transport/http"
+)
+
+// HTTPPresigner is an interface to a SigV4a signer that can sign create a
+// presigned URL for a HTTP requests.
+type HTTPPresigner interface {
+ PresignHTTP(
+ ctx context.Context, credentials Credentials, r *http.Request,
+ payloadHash string, service string, regionSet []string, signingTime time.Time,
+ optFns ...func(*SignerOptions),
+ ) (url string, signedHeader http.Header, err error)
+}
+
+// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware.
+type PresignHTTPRequestMiddlewareOptions struct {
+ CredentialsProvider CredentialsProvider
+ Presigner HTTPPresigner
+ LogSigning bool
+}
+
+// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a
+// presigned URL for an HTTP request.
+//
+// Will short circuit the middleware stack and not forward onto the next
+// Finalize handler.
+type PresignHTTPRequestMiddleware struct {
+ credentialsProvider CredentialsProvider
+ presigner HTTPPresigner
+ logSigning bool
+}
+
+// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware
+// initialized with the presigner.
+func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware {
+ return &PresignHTTPRequestMiddleware{
+ credentialsProvider: options.CredentialsProvider,
+ presigner: options.Presigner,
+ logSigning: options.LogSigning,
+ }
+}
+
+// ID provides the middleware ID.
+func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" }
+
+// HandleFinalize will take the provided input and create a presigned url for
+// the http request using the SigV4 presign authentication scheme.
+func (s *PresignHTTPRequestMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyHTTP.Request)
+ if !ok {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("unexpected request middleware type %T", in.Request),
+ }
+ }
+
+ httpReq := req.Build(ctx)
+ if !hasCredentialProvider(s.credentialsProvider) {
+ out.Result = &v4.PresignedHTTPRequest{
+ URL: httpReq.URL.String(),
+ Method: httpReq.Method,
+ SignedHeader: http.Header{},
+ }
+
+ return out, metadata, nil
+ }
+
+ signingName := awsmiddleware.GetSigningName(ctx)
+ signingRegion := awsmiddleware.GetSigningRegion(ctx)
+ payloadHash := v4.GetPayloadHash(ctx)
+ if len(payloadHash) == 0 {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("computed payload hash missing from context"),
+ }
+ }
+
+ credentials, err := s.credentialsProvider.RetrievePrivateKey(ctx)
+ if err != nil {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("failed to retrieve credentials: %w", err),
+ }
+ }
+
+ u, h, err := s.presigner.PresignHTTP(ctx, credentials,
+ httpReq, payloadHash, signingName, []string{signingRegion}, sdk.NowTime(),
+ func(o *SignerOptions) {
+ o.Logger = middleware.GetLogger(ctx)
+ o.LogSigning = s.logSigning
+ })
+ if err != nil {
+ return out, metadata, &SigningError{
+ Err: fmt.Errorf("failed to sign http request, %w", err),
+ }
+ }
+
+ out.Result = &v4.PresignedHTTPRequest{
+ URL: u,
+ Method: httpReq.Method,
+ SignedHeader: h,
+ }
+
+ return out, metadata, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go
new file mode 100644
index 000000000..af4f6abcf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go
@@ -0,0 +1,92 @@
+package v4a
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// CredentialsAdapter adapts v4a.Credentials to smithy auth.Identity.
+type CredentialsAdapter struct {
+ Credentials Credentials
+}
+
+var _ auth.Identity = (*CredentialsAdapter)(nil)
+
+// Expiration returns the time of expiration for the credentials.
+func (v *CredentialsAdapter) Expiration() time.Time {
+ return v.Credentials.Expires
+}
+
+// CredentialsProviderAdapter adapts v4a.CredentialsProvider to
+// auth.IdentityResolver.
+type CredentialsProviderAdapter struct {
+ Provider CredentialsProvider
+}
+
+var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil)
+
+// GetIdentity retrieves v4a credentials using the underlying provider.
+func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) (
+ auth.Identity, error,
+) {
+ creds, err := v.Provider.RetrievePrivateKey(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get credentials: %w", err)
+ }
+
+ return &CredentialsAdapter{Credentials: creds}, nil
+}
+
+// SignerAdapter adapts v4a.HTTPSigner to smithy http.Signer.
+type SignerAdapter struct {
+ Signer HTTPSigner
+ Logger logging.Logger
+ LogSigning bool
+}
+
+var _ (smithyhttp.Signer) = (*SignerAdapter)(nil)
+
+// SignRequest signs the request with the provided identity.
+func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error {
+ ca, ok := identity.(*CredentialsAdapter)
+ if !ok {
+ return fmt.Errorf("unexpected identity type: %T", identity)
+ }
+
+ name, ok := smithyhttp.GetSigV4SigningName(&props)
+ if !ok {
+ return fmt.Errorf("sigv4a signing name is required")
+ }
+
+ regions, ok := smithyhttp.GetSigV4ASigningRegions(&props)
+ if !ok {
+ return fmt.Errorf("sigv4a signing region is required")
+ }
+
+ hash := v4.GetPayloadHash(ctx)
+ signingTime := sdk.NowTime()
+ if skew := internalcontext.GetAttemptSkewContext(ctx); skew != 0 {
+ signingTime.Add(skew)
+ }
+ err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, signingTime, func(o *SignerOptions) {
+ o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props)
+
+ o.Logger = v.Logger
+ o.LogSigning = v.LogSigning
+ })
+ if err != nil {
+ return fmt.Errorf("sign http: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go
new file mode 100644
index 000000000..f1f6ecc37
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/v4a.go
@@ -0,0 +1,520 @@
+package v4a
+
+import (
+ "bytes"
+ "context"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "math/big"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ signerCrypto "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto"
+ v4Internal "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "github.com/aws/smithy-go/logging"
+)
+
+const (
+ // AmzRegionSetKey represents the region set header used for sigv4a
+ AmzRegionSetKey = "X-Amz-Region-Set"
+ amzAlgorithmKey = v4Internal.AmzAlgorithmKey
+ amzSecurityTokenKey = v4Internal.AmzSecurityTokenKey
+ amzDateKey = v4Internal.AmzDateKey
+ amzCredentialKey = v4Internal.AmzCredentialKey
+ amzSignedHeadersKey = v4Internal.AmzSignedHeadersKey
+ authorizationHeader = "Authorization"
+
+ signingAlgorithm = "AWS4-ECDSA-P256-SHA256"
+
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+
+ // EmptyStringSHA256 is a hex encoded SHA-256 hash of an empty string
+ EmptyStringSHA256 = v4Internal.EmptyStringSHA256
+
+ // Version of signing v4a
+ Version = "SigV4A"
+)
+
+var (
+ p256 elliptic.Curve
+ nMinusTwoP256 *big.Int
+
+ one = new(big.Int).SetInt64(1)
+)
+
+func init() {
+ // Ensure the elliptic curve parameters are initialized on package import rather then on first usage
+ p256 = elliptic.P256()
+
+ nMinusTwoP256 = new(big.Int).SetBytes(p256.Params().N.Bytes())
+ nMinusTwoP256 = nMinusTwoP256.Sub(nMinusTwoP256, new(big.Int).SetInt64(2))
+}
+
+// SignerOptions is the SigV4a signing options for constructing a Signer.
+type SignerOptions struct {
+ Logger logging.Logger
+ LogSigning bool
+
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+}
+
+// Signer is a SigV4a HTTP signing implementation
+type Signer struct {
+ options SignerOptions
+}
+
+// NewSigner constructs a SigV4a Signer.
+func NewSigner(optFns ...func(*SignerOptions)) *Signer {
+ options := SignerOptions{}
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &Signer{options: options}
+}
+
+// deriveKeyFromAccessKeyPair derives a NIST P-256 PrivateKey from the given
+// IAM AccessKey and SecretKey pair.
+//
+// Based on FIPS.186-4 Appendix B.4.2
+func deriveKeyFromAccessKeyPair(accessKey, secretKey string) (*ecdsa.PrivateKey, error) {
+ params := p256.Params()
+ bitLen := params.BitSize // Testing random candidates does not require an additional 64 bits
+ counter := 0x01
+
+ buffer := make([]byte, 1+len(accessKey)) // 1 byte counter + len(accessKey)
+ kdfContext := bytes.NewBuffer(buffer)
+
+ inputKey := append([]byte("AWS4A"), []byte(secretKey)...)
+
+ d := new(big.Int)
+ for {
+ kdfContext.Reset()
+ kdfContext.WriteString(accessKey)
+ kdfContext.WriteByte(byte(counter))
+
+ key, err := signerCrypto.HMACKeyDerivation(sha256.New, bitLen, inputKey, []byte(signingAlgorithm), kdfContext.Bytes())
+ if err != nil {
+ return nil, err
+ }
+
+ // Check key first before calling SetBytes if key key is in fact a valid candidate.
+ // This ensures the byte slice is the correct length (32-bytes) to compare in constant-time
+ cmp, err := signerCrypto.ConstantTimeByteCompare(key, nMinusTwoP256.Bytes())
+ if err != nil {
+ return nil, err
+ }
+ if cmp == -1 {
+ d.SetBytes(key)
+ break
+ }
+
+ counter++
+ if counter > 0xFF {
+ return nil, fmt.Errorf("exhausted single byte external counter")
+ }
+ }
+ d = d.Add(d, one)
+
+ priv := new(ecdsa.PrivateKey)
+ priv.PublicKey.Curve = p256
+ priv.D = d
+ priv.PublicKey.X, priv.PublicKey.Y = p256.ScalarBaseMult(d.Bytes())
+
+ return priv, nil
+}
+
+type httpSigner struct {
+ Request *http.Request
+ ServiceName string
+ RegionSet []string
+ Time time.Time
+ Credentials Credentials
+ IsPreSign bool
+
+ Logger logging.Logger
+ Debug bool
+
+ // PayloadHash is the hex encoded SHA-256 hash of the request payload
+ // If len(PayloadHash) == 0 the signer will attempt to send the request
+ // as an unsigned payload. Note: Unsigned payloads only work for a subset of services.
+ PayloadHash string
+
+ DisableHeaderHoisting bool
+ DisableURIPathEscaping bool
+}
+
+// SignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and signs using SigV4a.
+// The passed in request will be modified in place.
+func (s *Signer) SignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) error {
+ options := s.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ signer := &httpSigner{
+ Request: r,
+ PayloadHash: payloadHash,
+ ServiceName: service,
+ RegionSet: regionSet,
+ Credentials: credentials,
+ Time: signingTime.UTC(),
+ DisableHeaderHoisting: options.DisableHeaderHoisting,
+ DisableURIPathEscaping: options.DisableURIPathEscaping,
+ }
+
+ signedRequest, err := signer.Build()
+ if err != nil {
+ return err
+ }
+
+ logHTTPSigningInfo(ctx, options, signedRequest)
+
+ return nil
+}
+
+// PresignHTTP takes the provided http.Request, payload hash, service, regionSet, and time and presigns using SigV4a
+// Returns the presigned URL along with the headers that were signed with the request.
+//
+// PresignHTTP will not set the expires time of the presigned request
+// automatically. To specify the expire duration for a request add the
+// "X-Amz-Expires" query parameter on the request with the value as the
+// duration in seconds the presigned URL should be considered valid for. This
+// parameter is not used by all AWS services, and is most notable used by
+// Amazon S3 APIs.
+func (s *Signer) PresignHTTP(ctx context.Context, credentials Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*SignerOptions)) (signedURI string, signedHeaders http.Header, err error) {
+ options := s.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ signer := &httpSigner{
+ Request: r,
+ PayloadHash: payloadHash,
+ ServiceName: service,
+ RegionSet: regionSet,
+ Credentials: credentials,
+ Time: signingTime.UTC(),
+ IsPreSign: true,
+ DisableHeaderHoisting: options.DisableHeaderHoisting,
+ DisableURIPathEscaping: options.DisableURIPathEscaping,
+ }
+
+ signedRequest, err := signer.Build()
+ if err != nil {
+ return "", nil, err
+ }
+
+ logHTTPSigningInfo(ctx, options, signedRequest)
+
+ signedHeaders = make(http.Header)
+
+ // For the signed headers we canonicalize the header keys in the returned map.
+ // This avoids situations where can standard library double headers like host header. For example the standard
+ // library will set the Host header, even if it is present in lower-case form.
+ for k, v := range signedRequest.SignedHeaders {
+ key := textproto.CanonicalMIMEHeaderKey(k)
+ signedHeaders[key] = append(signedHeaders[key], v...)
+ }
+
+ return signedRequest.Request.URL.String(), signedHeaders, nil
+}
+
+func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) {
+ amzDate := s.Time.Format(timeFormat)
+
+ if s.IsPreSign {
+ query.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
+ query.Set(amzDateKey, amzDate)
+ query.Set(amzAlgorithmKey, signingAlgorithm)
+ if len(s.Credentials.SessionToken) > 0 {
+ query.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
+ }
+ return
+ }
+
+ headers.Set(AmzRegionSetKey, strings.Join(s.RegionSet, ","))
+ headers.Set(amzDateKey, amzDate)
+ if len(s.Credentials.SessionToken) > 0 {
+ headers.Set(amzSecurityTokenKey, s.Credentials.SessionToken)
+ }
+}
+
+func (s *httpSigner) Build() (signedRequest, error) {
+ req := s.Request
+
+ query := req.URL.Query()
+ headers := req.Header
+
+ s.setRequiredSigningFields(headers, query)
+
+ // Sort Each Query Key's Values
+ for key := range query {
+ sort.Strings(query[key])
+ }
+
+ v4Internal.SanitizeHostForHeader(req)
+
+ credentialScope := s.buildCredentialScope()
+ credentialStr := s.Credentials.Context + "/" + credentialScope
+ if s.IsPreSign {
+ query.Set(amzCredentialKey, credentialStr)
+ }
+
+ unsignedHeaders := headers
+ if s.IsPreSign && !s.DisableHeaderHoisting {
+ urlValues := url.Values{}
+ urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, unsignedHeaders)
+ for k := range urlValues {
+ query[k] = urlValues[k]
+ }
+ }
+
+ host := req.URL.Host
+ if len(req.Host) > 0 {
+ host = req.Host
+ }
+
+ signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength)
+
+ if s.IsPreSign {
+ query.Set(amzSignedHeadersKey, signedHeadersStr)
+ }
+
+ rawQuery := strings.Replace(query.Encode(), "+", "%20", -1)
+
+ canonicalURI := v4Internal.GetURIPath(req.URL)
+ if !s.DisableURIPathEscaping {
+ canonicalURI = httpbinding.EscapePath(canonicalURI, false)
+ }
+
+ canonicalString := s.buildCanonicalString(
+ req.Method,
+ canonicalURI,
+ rawQuery,
+ signedHeadersStr,
+ canonicalHeaderStr,
+ )
+
+ strToSign := s.buildStringToSign(credentialScope, canonicalString)
+ signingSignature, err := s.buildSignature(strToSign)
+ if err != nil {
+ return signedRequest{}, err
+ }
+
+ if s.IsPreSign {
+ rawQuery += "&X-Amz-Signature=" + signingSignature
+ } else {
+ headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature))
+ }
+
+ req.URL.RawQuery = rawQuery
+
+ return signedRequest{
+ Request: req,
+ SignedHeaders: signedHeaders,
+ CanonicalString: canonicalString,
+ StringToSign: strToSign,
+ PreSigned: s.IsPreSign,
+ }, nil
+}
+
+func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string {
+ const credential = "Credential="
+ const signedHeaders = "SignedHeaders="
+ const signature = "Signature="
+ const commaSpace = ", "
+
+ var parts strings.Builder
+ parts.Grow(len(signingAlgorithm) + 1 +
+ len(credential) + len(credentialStr) + len(commaSpace) +
+ len(signedHeaders) + len(signedHeadersStr) + len(commaSpace) +
+ len(signature) + len(signingSignature),
+ )
+ parts.WriteString(signingAlgorithm)
+ parts.WriteRune(' ')
+ parts.WriteString(credential)
+ parts.WriteString(credentialStr)
+ parts.WriteString(commaSpace)
+ parts.WriteString(signedHeaders)
+ parts.WriteString(signedHeadersStr)
+ parts.WriteString(commaSpace)
+ parts.WriteString(signature)
+ parts.WriteString(signingSignature)
+ return parts.String()
+}
+
+func (s *httpSigner) buildCredentialScope() string {
+ return strings.Join([]string{
+ s.Time.Format(shortTimeFormat),
+ s.ServiceName,
+ "aws4_request",
+ }, "/")
+
+}
+
+func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+
+func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) {
+ signed = make(http.Header)
+
+ var headers []string
+ const hostHeader = "host"
+ headers = append(headers, hostHeader)
+ signed[hostHeader] = append(signed[hostHeader], host)
+
+ if length > 0 {
+ const contentLengthHeader = "content-length"
+ headers = append(headers, contentLengthHeader)
+ signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
+ }
+
+ for k, v := range header {
+ if !rule.IsValid(k) {
+ continue // ignored header
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := signed[lowerCaseKey]; ok {
+ // include additional values
+ signed[lowerCaseKey] = append(signed[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ signed[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ signedHeaders = strings.Join(headers, ";")
+
+ var canonicalHeaders strings.Builder
+ n := len(headers)
+ const colon = ':'
+ for i := 0; i < n; i++ {
+ if headers[i] == hostHeader {
+ canonicalHeaders.WriteString(hostHeader)
+ canonicalHeaders.WriteRune(colon)
+ canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host))
+ } else {
+ canonicalHeaders.WriteString(headers[i])
+ canonicalHeaders.WriteRune(colon)
+ // Trim out leading, trailing, and dedup inner spaces from signed header values.
+ values := signed[headers[i]]
+ for j, v := range values {
+ cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
+ canonicalHeaders.WriteString(cleanedValue)
+ if j < len(values)-1 {
+ canonicalHeaders.WriteRune(',')
+ }
+ }
+ }
+ canonicalHeaders.WriteRune('\n')
+ }
+ canonicalHeadersStr = canonicalHeaders.String()
+
+ return signed, signedHeaders, canonicalHeadersStr
+}
+
+func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string {
+ return strings.Join([]string{
+ method,
+ uri,
+ query,
+ canonicalHeaders,
+ signedHeaders,
+ s.PayloadHash,
+ }, "\n")
+}
+
+func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string {
+ return strings.Join([]string{
+ signingAlgorithm,
+ s.Time.Format(timeFormat),
+ credentialScope,
+ hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))),
+ }, "\n")
+}
+
+func makeHash(hash hash.Hash, b []byte) []byte {
+ hash.Reset()
+ hash.Write(b)
+ return hash.Sum(nil)
+}
+
+func (s *httpSigner) buildSignature(strToSign string) (string, error) {
+ sig, err := s.Credentials.PrivateKey.Sign(rand.Reader, makeHash(sha256.New(), []byte(strToSign)), crypto.SHA256)
+ if err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(sig), nil
+}
+
+const logSignInfoMsg = `Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func logHTTPSigningInfo(ctx context.Context, options SignerOptions, r signedRequest) {
+ if !options.LogSigning {
+ return
+ }
+ signedURLMsg := ""
+ if r.PreSigned {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, r.Request.URL.String())
+ }
+ logger := logging.WithContext(ctx, options.Logger)
+ logger.Logf(logging.Debug, logSignInfoMsg, r.CanonicalString, r.StringToSign, signedURLMsg)
+}
+
+type signedRequest struct {
+ Request *http.Request
+ SignedHeaders http.Header
+ CanonicalString string
+ StringToSign string
+ PreSigned bool
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
new file mode 100644
index 000000000..c05f82ea4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
@@ -0,0 +1,184 @@
+# v1.13.3 (2025-11-04)
+
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.13.2 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+
+# v1.13.1 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+
+# v1.13.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+
+# v1.12.4 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+
+# v1.12.3 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+
+# v1.12.2 (2025-01-24)
+
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.12.1 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+
+# v1.12.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+
+# v1.11.5 (2024-09-20)
+
+* No change notes available for this release.
+
+# v1.11.4 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+
+# v1.11.3 (2024-06-28)
+
+* No change notes available for this release.
+
+# v1.11.2 (2024-03-29)
+
+* No change notes available for this release.
+
+# v1.11.1 (2024-02-21)
+
+* No change notes available for this release.
+
+# v1.11.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# v1.10.4 (2023-12-07)
+
+* No change notes available for this release.
+
+# v1.10.3 (2023-11-30)
+
+* No change notes available for this release.
+
+# v1.10.2 (2023-11-29)
+
+* No change notes available for this release.
+
+# v1.10.1 (2023-11-15)
+
+* No change notes available for this release.
+
+# v1.10.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+
+# v1.9.15 (2023-10-06)
+
+* No change notes available for this release.
+
+# v1.9.14 (2023-08-18)
+
+* No change notes available for this release.
+
+# v1.9.13 (2023-08-07)
+
+* No change notes available for this release.
+
+# v1.9.12 (2023-07-31)
+
+* No change notes available for this release.
+
+# v1.9.11 (2022-12-02)
+
+* No change notes available for this release.
+
+# v1.9.10 (2022-10-24)
+
+* No change notes available for this release.
+
+# v1.9.9 (2022-09-14)
+
+* No change notes available for this release.
+
+# v1.9.8 (2022-09-02)
+
+* No change notes available for this release.
+
+# v1.9.7 (2022-08-31)
+
+* No change notes available for this release.
+
+# v1.9.6 (2022-08-29)
+
+* No change notes available for this release.
+
+# v1.9.5 (2022-08-11)
+
+* No change notes available for this release.
+
+# v1.9.4 (2022-08-09)
+
+* No change notes available for this release.
+
+# v1.9.3 (2022-06-29)
+
+* No change notes available for this release.
+
+# v1.9.2 (2022-06-07)
+
+* No change notes available for this release.
+
+# v1.9.1 (2022-03-24)
+
+* No change notes available for this release.
+
+# v1.9.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.8.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.7.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.6.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.5.0 (2021-11-06)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.4.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+
+# v1.3.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.2.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+
+# v1.2.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.2.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.1.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go
new file mode 100644
index 000000000..3f451fc9b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go
@@ -0,0 +1,176 @@
+package acceptencoding
+
+import (
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const acceptEncodingHeaderKey = "Accept-Encoding"
+const contentEncodingHeaderKey = "Content-Encoding"
+
+// AddAcceptEncodingGzipOptions provides the options for the
+// AddAcceptEncodingGzip middleware setup.
+type AddAcceptEncodingGzipOptions struct {
+ Enable bool
+}
+
+// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP
+// middleware to the operation stack. This allows checksums to be correctly
+// computed without disabling GZIP support.
+func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error {
+ if options.Enable {
+ if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ return stack.Finalize.Add(&DisableGzip{}, middleware.Before)
+}
+
+// DisableGzip provides the middleware that will
+// disable the underlying http client automatically enabling for gzip
+// decompress content-encoding support.
+type DisableGzip struct{}
+
+// ID returns the id for the middleware.
+func (*DisableGzip) ID() string {
+ return "DisableAcceptEncodingGzip"
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface.
+func (*DisableGzip) HandleFinalize(
+ ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ output middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return output, metadata, &smithy.SerializationError{
+ Err: fmt.Errorf("unknown request type %T", input.Request),
+ }
+ }
+
+ // Explicitly enable gzip support, this will prevent the http client from
+ // auto extracting the zipped content.
+ req.Header.Set(acceptEncodingHeaderKey, "identity")
+
+ return next.HandleFinalize(ctx, input)
+}
+
+// EnableGzip provides a middleware to enable support for
+// gzip responses, with manual decompression. This prevents the underlying HTTP
+// client from performing the gzip decompression automatically.
+type EnableGzip struct{}
+
+// ID returns the id for the middleware.
+func (*EnableGzip) ID() string {
+ return "AcceptEncodingGzip"
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface.
+func (*EnableGzip) HandleFinalize(
+ ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ output middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return output, metadata, &smithy.SerializationError{
+ Err: fmt.Errorf("unknown request type %T", input.Request),
+ }
+ }
+
+ // Explicitly enable gzip support, this will prevent the http client from
+ // auto extracting the zipped content.
+ req.Header.Set(acceptEncodingHeaderKey, "gzip")
+
+ return next.HandleFinalize(ctx, input)
+}
+
+// DecompressGzip provides the middleware for decompressing a gzip
+// response from the service.
+type DecompressGzip struct{}
+
+// ID returns the id for the middleware.
+func (*DecompressGzip) ID() string {
+ return "DecompressGzip"
+}
+
+// HandleDeserialize implements the DeserializeMiddlware interface.
+func (*DecompressGzip) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ output, metadata, err = next.HandleDeserialize(ctx, input)
+ if err != nil {
+ return output, metadata, err
+ }
+
+ resp, ok := output.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return output, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("unknown response type %T", output.RawResponse),
+ }
+ }
+ if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" {
+ return output, metadata, err
+ }
+
+ // Clear content length since it will no longer be valid once the response
+ // body is decompressed.
+ resp.Header.Del("Content-Length")
+ resp.ContentLength = -1
+
+ resp.Body = wrapGzipReader(resp.Body)
+
+ return output, metadata, err
+}
+
+type gzipReader struct {
+ reader io.ReadCloser
+ gzip *gzip.Reader
+}
+
+func wrapGzipReader(reader io.ReadCloser) *gzipReader {
+ return &gzipReader{
+ reader: reader,
+ }
+}
+
+// Read wraps the gzip reader around the underlying io.Reader to extract the
+// response bytes on the fly.
+func (g *gzipReader) Read(b []byte) (n int, err error) {
+ if g.gzip == nil {
+ g.gzip, err = gzip.NewReader(g.reader)
+ if err != nil {
+ g.gzip = nil // ensure uninitialized gzip value isn't used in close.
+ return 0, fmt.Errorf("failed to decompress gzip response, %w", err)
+ }
+ }
+
+ return g.gzip.Read(b)
+}
+
+func (g *gzipReader) Close() error {
+ if g.gzip == nil {
+ return nil
+ }
+
+ if err := g.gzip.Close(); err != nil {
+ g.reader.Close()
+ return fmt.Errorf("failed to decompress gzip response, %w", err)
+ }
+
+ return g.reader.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go
new file mode 100644
index 000000000..7056d9bf6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go
@@ -0,0 +1,22 @@
+/*
+Package acceptencoding provides customizations associated with Accept Encoding Header.
+
+# Accept encoding gzip
+
+The Go HTTP client automatically supports accept-encoding and content-encoding
+gzip by default. This default behavior is not desired by the SDK, and prevents
+validating the response body's checksum. To prevent this the SDK must manually
+control usage of content-encoding gzip.
+
+To control content-encoding, the SDK must always set the `Accept-Encoding`
+header to a value. This prevents the HTTP client from using gzip automatically.
+When gzip is enabled on the API client, the SDK's customization will control
+decompressing the gzip data in order to not break the checksum validation. When
+gzip is disabled, the API client will disable gzip, preventing the HTTP
+client's default behavior.
+
+An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using
+the below middleware. The option if present can be used to enable auto decompressing
+gzip by the SDK.
+*/
+package acceptencoding
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
new file mode 100644
index 000000000..6a4c33605
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package acceptencoding
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.13.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
new file mode 100644
index 000000000..af4dcdc14
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
@@ -0,0 +1,382 @@
+# v1.7.0 (2025-03-11)
+
+* **Feature**: Add extra check during output checksum validation so the validation skip warning would not be logged if object is not fetched from s3
+
+# v1.6.2 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.1 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2025-02-10)
+
+* **Feature**: Support CRC64NVME flex checksums.
+
+# v1.5.6 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.5 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.4 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.3 (2025-01-24)
+
+* **Bug Fix**: Enable request checksum validation mode by default
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.5.2 (2025-01-17)
+
+* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop.
+
+# v1.5.1 (2025-01-16)
+
+* **Bug Fix**: Fix nil dereference panic for operations that require checksums, but do not have an input setting for which algorithm to use.
+
+# v1.5.0 (2025-01-15)
+
+* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.8 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.7 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.6 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.5 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.20 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.19 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.18 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.17 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.16 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.15 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.14 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.13 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.12 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.11 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.10 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.7 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.6 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.5 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.4 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.38 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.37 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.36 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.35 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.34 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.33 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.32 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.31 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.30 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.29 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.28 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.27 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.26 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.25 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.24 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.23 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.22 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.21 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.20 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.19 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.18 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.17 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.16 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.15 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.14 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.13 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.12 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.11 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.10 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.9 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.8 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.7 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.6 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.5 (2022-04-27)
+
+* **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors.
+
+# v1.1.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2022-03-08)
+
+* **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606)
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.0 (2022-02-24)
+
+* **Release**: New module for computing checksums
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go
new file mode 100644
index 000000000..dab97fb22
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go
@@ -0,0 +1,335 @@
+package checksum
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "hash"
+ "hash/crc32"
+ "hash/crc64"
+ "io"
+ "strings"
+ "sync"
+)
+
+// Algorithm represents the checksum algorithms supported
+type Algorithm string
+
+// Enumeration values for supported checksum Algorithms.
+const (
+ // AlgorithmCRC32C represents CRC32C hash algorithm
+ AlgorithmCRC32C Algorithm = "CRC32C"
+
+ // AlgorithmCRC32 represents CRC32 hash algorithm
+ AlgorithmCRC32 Algorithm = "CRC32"
+
+ // AlgorithmSHA1 represents SHA1 hash algorithm
+ AlgorithmSHA1 Algorithm = "SHA1"
+
+ // AlgorithmSHA256 represents SHA256 hash algorithm
+ AlgorithmSHA256 Algorithm = "SHA256"
+
+ // AlgorithmCRC64NVME represents CRC64NVME hash algorithm
+ AlgorithmCRC64NVME Algorithm = "CRC64NVME"
+)
+
+// inverted NVME polynomial as required by crc64.MakeTable
+const crc64NVME = 0x9a6c_9329_ac4b_c9b5
+
+var supportedAlgorithms = []Algorithm{
+ AlgorithmCRC32C,
+ AlgorithmCRC32,
+ AlgorithmSHA1,
+ AlgorithmSHA256,
+ AlgorithmCRC64NVME,
+}
+
+func (a Algorithm) String() string { return string(a) }
+
+// ParseAlgorithm attempts to parse the provided value into a checksum
+// algorithm, matching without case. Returns the algorithm matched, or an error
+// if the algorithm wasn't matched.
+func ParseAlgorithm(v string) (Algorithm, error) {
+ for _, a := range supportedAlgorithms {
+ if strings.EqualFold(string(a), v) {
+ return a, nil
+ }
+ }
+ return "", fmt.Errorf("unknown checksum algorithm, %v", v)
+}
+
+// FilterSupportedAlgorithms filters the set of algorithms, returning a slice
+// of algorithms that are supported.
+func FilterSupportedAlgorithms(vs []string) []Algorithm {
+ found := map[Algorithm]struct{}{}
+
+ supported := make([]Algorithm, 0, len(supportedAlgorithms))
+ for _, v := range vs {
+ for _, a := range supportedAlgorithms {
+ // Only consider algorithms that are supported
+ if !strings.EqualFold(v, string(a)) {
+ continue
+ }
+ // Ignore duplicate algorithms in list.
+ if _, ok := found[a]; ok {
+ continue
+ }
+
+ supported = append(supported, a)
+ found[a] = struct{}{}
+ }
+ }
+ return supported
+}
+
+// NewAlgorithmHash returns a hash.Hash for the checksum algorithm. Error is
+// returned if the algorithm is unknown.
+func NewAlgorithmHash(v Algorithm) (hash.Hash, error) {
+ switch v {
+ case AlgorithmSHA1:
+ return sha1.New(), nil
+ case AlgorithmSHA256:
+ return sha256.New(), nil
+ case AlgorithmCRC32:
+ return crc32.NewIEEE(), nil
+ case AlgorithmCRC32C:
+ return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil
+ case AlgorithmCRC64NVME:
+ return crc64.New(crc64.MakeTable(crc64NVME)), nil
+ default:
+ return nil, fmt.Errorf("unknown checksum algorithm, %v", v)
+ }
+}
+
+// AlgorithmChecksumLength returns the length of the algorithm's checksum in
+// bytes. If the algorithm is not known, an error is returned.
+func AlgorithmChecksumLength(v Algorithm) (int, error) {
+ switch v {
+ case AlgorithmSHA1:
+ return sha1.Size, nil
+ case AlgorithmSHA256:
+ return sha256.Size, nil
+ case AlgorithmCRC32:
+ return crc32.Size, nil
+ case AlgorithmCRC32C:
+ return crc32.Size, nil
+ case AlgorithmCRC64NVME:
+ return crc64.Size, nil
+ default:
+ return 0, fmt.Errorf("unknown checksum algorithm, %v", v)
+ }
+}
+
+const awsChecksumHeaderPrefix = "x-amz-checksum-"
+
+// AlgorithmHTTPHeader returns the HTTP header for the algorithm's hash.
+func AlgorithmHTTPHeader(v Algorithm) string {
+ return awsChecksumHeaderPrefix + strings.ToLower(string(v))
+}
+
+// base64EncodeHashSum computes base64 encoded checksum of a given running
+// hash. The running hash must already have content written to it. Returns the
+// byte slice of checksum and an error
+func base64EncodeHashSum(h hash.Hash) []byte {
+ sum := h.Sum(nil)
+ sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
+ base64.StdEncoding.Encode(sum64, sum)
+ return sum64
+}
+
+// hexEncodeHashSum computes hex encoded checksum of a given running hash. The
+// running hash must already have content written to it. Returns the byte slice
+// of checksum and an error
+func hexEncodeHashSum(h hash.Hash) []byte {
+ sum := h.Sum(nil)
+ sumHex := make([]byte, hex.EncodedLen(len(sum)))
+ hex.Encode(sumHex, sum)
+ return sumHex
+}
+
+// computeMD5Checksum computes base64 MD5 checksum of an io.Reader's contents.
+// Returns the byte slice of MD5 checksum and an error.
+func computeMD5Checksum(r io.Reader) ([]byte, error) {
+ h := md5.New()
+
+ // Copy errors may be assumed to be from the body.
+ if _, err := io.Copy(h, r); err != nil {
+ return nil, fmt.Errorf("failed compute MD5 hash of reader, %w", err)
+ }
+
+ // Encode the MD5 checksum in base64.
+ return base64EncodeHashSum(h), nil
+}
+
+// computeChecksumReader provides a reader wrapping an underlying io.Reader to
+// compute the checksum of the stream's bytes.
+type computeChecksumReader struct {
+ stream io.Reader
+ algorithm Algorithm
+ hasher hash.Hash
+ base64ChecksumLen int
+
+ mux sync.RWMutex
+ lockedChecksum string
+ lockedErr error
+}
+
+// newComputeChecksumReader returns a computeChecksumReader for the stream and
+// algorithm specified. Returns error if unable to create the reader, or
+// algorithm is unknown.
+func newComputeChecksumReader(stream io.Reader, algorithm Algorithm) (*computeChecksumReader, error) {
+ hasher, err := NewAlgorithmHash(algorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ checksumLength, err := AlgorithmChecksumLength(algorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ return &computeChecksumReader{
+ stream: io.TeeReader(stream, hasher),
+ algorithm: algorithm,
+ hasher: hasher,
+ base64ChecksumLen: base64.StdEncoding.EncodedLen(checksumLength),
+ }, nil
+}
+
+// Read wraps the underlying reader. When the underlying reader returns EOF,
+// the checksum of the reader will be computed, and can be retrieved with
+// ChecksumBase64String.
+func (r *computeChecksumReader) Read(p []byte) (int, error) {
+ n, err := r.stream.Read(p)
+ if err == nil {
+ return n, nil
+ } else if err != io.EOF {
+ r.mux.Lock()
+ defer r.mux.Unlock()
+
+ r.lockedErr = err
+ return n, err
+ }
+
+ b := base64EncodeHashSum(r.hasher)
+
+ r.mux.Lock()
+ defer r.mux.Unlock()
+
+ r.lockedChecksum = string(b)
+
+ return n, err
+}
+
+func (r *computeChecksumReader) Algorithm() Algorithm {
+ return r.algorithm
+}
+
+// Base64ChecksumLength returns the base64 encoded length of the checksum for
+// algorithm.
+func (r *computeChecksumReader) Base64ChecksumLength() int {
+ return r.base64ChecksumLen
+}
+
+// Base64Checksum returns the base64 checksum for the algorithm, or error if
+// the underlying reader returned a non-EOF error.
+//
+// Safe to be called concurrently, but will return an error until after the
+// underlying reader is returns EOF.
+func (r *computeChecksumReader) Base64Checksum() (string, error) {
+ r.mux.RLock()
+ defer r.mux.RUnlock()
+
+ if r.lockedErr != nil {
+ return "", r.lockedErr
+ }
+
+ if r.lockedChecksum == "" {
+ return "", fmt.Errorf(
+ "checksum not available yet, called before reader returns EOF",
+ )
+ }
+
+ return r.lockedChecksum, nil
+}
+
+// validateChecksumReader implements io.ReadCloser interface. The wrapper
+// performs checksum validation when the underlying reader has been fully read.
+type validateChecksumReader struct {
+ originalBody io.ReadCloser
+ body io.Reader
+ hasher hash.Hash
+ algorithm Algorithm
+ expectChecksum string
+}
+
+// newValidateChecksumReader returns a configured io.ReadCloser that performs
+// checksum validation when the underlying reader has been fully read.
+func newValidateChecksumReader(
+ body io.ReadCloser,
+ algorithm Algorithm,
+ expectChecksum string,
+) (*validateChecksumReader, error) {
+ hasher, err := NewAlgorithmHash(algorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ return &validateChecksumReader{
+ originalBody: body,
+ body: io.TeeReader(body, hasher),
+ hasher: hasher,
+ algorithm: algorithm,
+ expectChecksum: expectChecksum,
+ }, nil
+}
+
+// Read attempts to read from the underlying stream while also updating the
+// running hash. If the underlying stream returns with an EOF error, the
+// checksum of the stream will be collected, and compared against the expected
+// checksum. If the checksums do not match, an error will be returned.
+//
+// If a non-EOF error occurs when reading the underlying stream, that error
+// will be returned and the checksum for the stream will be discarded.
+func (c *validateChecksumReader) Read(p []byte) (n int, err error) {
+ n, err = c.body.Read(p)
+ if err == io.EOF {
+ if checksumErr := c.validateChecksum(); checksumErr != nil {
+ return n, checksumErr
+ }
+ }
+
+ return n, err
+}
+
+// Close closes the underlying reader, returning any error that occurred in the
+// underlying reader.
+func (c *validateChecksumReader) Close() (err error) {
+ return c.originalBody.Close()
+}
+
+func (c *validateChecksumReader) validateChecksum() error {
+ // Compute base64 encoded checksum hash of the payload's read bytes.
+ v := base64EncodeHashSum(c.hasher)
+ if e, a := c.expectChecksum, string(v); !strings.EqualFold(e, a) {
+ return validationError{
+ Algorithm: c.algorithm, Expect: e, Actual: a,
+ }
+ }
+
+ return nil
+}
+
+type validationError struct {
+ Algorithm Algorithm
+ Expect string
+ Actual string
+}
+
+func (v validationError) Error() string {
+ return fmt.Sprintf("checksum did not match: algorithm %v, expect %v, actual %v",
+ v.Algorithm, v.Expect, v.Actual)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go
new file mode 100644
index 000000000..3bd320c43
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/aws_chunked_encoding.go
@@ -0,0 +1,389 @@
+package checksum
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+const (
+ crlf = "\r\n"
+
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
+ defaultChunkLength = 1024 * 64
+
+ awsTrailerHeaderName = "x-amz-trailer"
+ decodedContentLengthHeaderName = "x-amz-decoded-content-length"
+
+ contentEncodingHeaderName = "content-encoding"
+ awsChunkedContentEncodingHeaderValue = "aws-chunked"
+
+ trailerKeyValueSeparator = ":"
+)
+
+var (
+ crlfBytes = []byte(crlf)
+ finalChunkBytes = []byte("0" + crlf)
+)
+
+type awsChunkedEncodingOptions struct {
+ // The total size of the stream. For unsigned encoding this implies that
+ // there will only be a single chunk containing the underlying payload,
+ // unless ChunkLength is also specified.
+ StreamLength int64
+
+ // Set of trailer key:value pairs that will be appended to the end of the
+ // payload after the end chunk has been written.
+ Trailers map[string]awsChunkedTrailerValue
+
+ // The maximum size of each chunk to be sent. Default value of -1, signals
+ // that optimal chunk length will be used automatically. ChunkSize must be
+ // at least 8KB.
+ //
+ // If ChunkLength and StreamLength are both specified, the stream will be
+ // broken up into ChunkLength chunks. The encoded length of the aws-chunked
+ // encoding can still be determined as long as all trailers, if any, have a
+ // fixed length.
+ ChunkLength int
+}
+
+type awsChunkedTrailerValue struct {
+ // Function to retrieve the value of the trailer. Will only be called after
+ // the underlying stream returns EOF error.
+ Get func() (string, error)
+
+ // If the length of the value can be pre-determined, and is constant
+ // specify the length. A value of -1 means the length is unknown, or
+ // cannot be pre-determined.
+ Length int
+}
+
+// awsChunkedEncoding provides a reader that wraps the payload such that
+// payload is read as a single aws-chunk payload. This reader can only be used
+// if the content length of payload is known. Content-Length is used as size of
+// the single payload chunk. The final chunk and trailing checksum is appended
+// at the end.
+//
+// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
+//
+// Here is the aws-chunked payload stream as read from the awsChunkedEncoding
+// if original request stream is "Hello world", and checksum hash used is SHA256
+//
+// \r\n
+// Hello world\r\n
+// 0\r\n
+// x-amz-checksum-sha256:ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw=\r\n
+// \r\n
+type awsChunkedEncoding struct {
+ options awsChunkedEncodingOptions
+
+ encodedStream io.Reader
+ trailerEncodedLength int
+}
+
+// newUnsignedAWSChunkedEncoding returns a new awsChunkedEncoding configured
+// for unsigned aws-chunked content encoding. Any additional trailers that need
+// to be appended after the end chunk must be included as via Trailer
+// callbacks.
+func newUnsignedAWSChunkedEncoding(
+ stream io.Reader,
+ optFns ...func(*awsChunkedEncodingOptions),
+) *awsChunkedEncoding {
+ options := awsChunkedEncodingOptions{
+ Trailers: map[string]awsChunkedTrailerValue{},
+ StreamLength: -1,
+ ChunkLength: -1,
+ }
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ var chunkReader io.Reader
+ if options.ChunkLength != -1 || options.StreamLength == -1 {
+ if options.ChunkLength == -1 {
+ options.ChunkLength = defaultChunkLength
+ }
+ chunkReader = newBufferedAWSChunkReader(stream, options.ChunkLength)
+ } else {
+ chunkReader = newUnsignedChunkReader(stream, options.StreamLength)
+ }
+
+ trailerReader := newAWSChunkedTrailerReader(options.Trailers)
+
+ return &awsChunkedEncoding{
+ options: options,
+ encodedStream: io.MultiReader(chunkReader,
+ trailerReader,
+ bytes.NewBuffer(crlfBytes),
+ ),
+ trailerEncodedLength: trailerReader.EncodedLength(),
+ }
+}
+
+// EncodedLength returns the final length of the aws-chunked content encoded
+// stream if it can be determined without reading the underlying stream or lazy
+// header values, otherwise -1 is returned.
+func (e *awsChunkedEncoding) EncodedLength() int64 {
+ var length int64
+ if e.options.StreamLength == -1 || e.trailerEncodedLength == -1 {
+ return -1
+ }
+
+ if e.options.StreamLength != 0 {
+ // If the stream length is known, and there is no chunk length specified,
+ // only a single chunk will be used. Otherwise the stream length needs to
+ // include the multiple chunk padding content.
+ if e.options.ChunkLength == -1 {
+ length += getUnsignedChunkBytesLength(e.options.StreamLength)
+
+ } else {
+ // Compute chunk header and payload length
+ numChunks := e.options.StreamLength / int64(e.options.ChunkLength)
+ length += numChunks * getUnsignedChunkBytesLength(int64(e.options.ChunkLength))
+ if remainder := e.options.StreamLength % int64(e.options.ChunkLength); remainder != 0 {
+ length += getUnsignedChunkBytesLength(remainder)
+ }
+ }
+ }
+
+ // End chunk
+ length += int64(len(finalChunkBytes))
+
+ // Trailers
+ length += int64(e.trailerEncodedLength)
+
+ // Encoding terminator
+ length += int64(len(crlf))
+
+ return length
+}
+
+func getUnsignedChunkBytesLength(payloadLength int64) int64 {
+ payloadLengthStr := strconv.FormatInt(payloadLength, 16)
+ return int64(len(payloadLengthStr)) + int64(len(crlf)) + payloadLength + int64(len(crlf))
+}
+
+// HTTPHeaders returns the set of headers that must be included the request for
+// aws-chunked to work. This includes the content-encoding: aws-chunked header.
+//
+// If there are multiple layered content encoding, the aws-chunked encoding
+// must be appended to the previous layers the stream's encoding. The best way
+// to do this is to append all header values returned to the HTTP request's set
+// of headers.
+func (e *awsChunkedEncoding) HTTPHeaders() map[string][]string {
+ headers := map[string][]string{
+ contentEncodingHeaderName: {
+ awsChunkedContentEncodingHeaderValue,
+ },
+ }
+
+ if len(e.options.Trailers) != 0 {
+ trailers := make([]string, 0, len(e.options.Trailers))
+ for name := range e.options.Trailers {
+ trailers = append(trailers, strings.ToLower(name))
+ }
+ headers[awsTrailerHeaderName] = trailers
+ }
+
+ return headers
+}
+
+func (e *awsChunkedEncoding) Read(b []byte) (n int, err error) {
+ return e.encodedStream.Read(b)
+}
+
+// awsChunkedTrailerReader provides a lazy reader for reading of aws-chunked
+// content encoded trailers. The trailer values will not be retrieved until the
+// reader is read from.
+type awsChunkedTrailerReader struct {
+ reader *bytes.Buffer
+ trailers map[string]awsChunkedTrailerValue
+ trailerEncodedLength int
+}
+
+// newAWSChunkedTrailerReader returns an initialized awsChunkedTrailerReader to
+// lazy reading aws-chunk content encoded trailers.
+func newAWSChunkedTrailerReader(trailers map[string]awsChunkedTrailerValue) *awsChunkedTrailerReader {
+ return &awsChunkedTrailerReader{
+ trailers: trailers,
+ trailerEncodedLength: trailerEncodedLength(trailers),
+ }
+}
+
+func trailerEncodedLength(trailers map[string]awsChunkedTrailerValue) (length int) {
+ for name, trailer := range trailers {
+ length += len(name) + len(trailerKeyValueSeparator)
+ l := trailer.Length
+ if l == -1 {
+ return -1
+ }
+ length += l + len(crlf)
+ }
+
+ return length
+}
+
+// EncodedLength returns the length of the encoded trailers if the length could
+// be determined without retrieving the header values. Returns -1 if length is
+// unknown.
+func (r *awsChunkedTrailerReader) EncodedLength() (length int) {
+ return r.trailerEncodedLength
+}
+
+// Read populates the passed in byte slice with bytes from the encoded
+// trailers. Will lazy read header values first time Read is called.
+func (r *awsChunkedTrailerReader) Read(p []byte) (int, error) {
+ if r.trailerEncodedLength == 0 {
+ return 0, io.EOF
+ }
+
+ if r.reader == nil {
+ trailerLen := r.trailerEncodedLength
+ if r.trailerEncodedLength == -1 {
+ trailerLen = 0
+ }
+ r.reader = bytes.NewBuffer(make([]byte, 0, trailerLen))
+ for name, trailer := range r.trailers {
+ r.reader.WriteString(name)
+ r.reader.WriteString(trailerKeyValueSeparator)
+ v, err := trailer.Get()
+ if err != nil {
+ return 0, fmt.Errorf("failed to get trailer value, %w", err)
+ }
+ r.reader.WriteString(v)
+ r.reader.WriteString(crlf)
+ }
+ }
+
+ return r.reader.Read(p)
+}
+
+// newUnsignedChunkReader returns an io.Reader encoding the underlying reader
+// as unsigned aws-chunked chunks. The returned reader will also include the
+// end chunk, but not the aws-chunked final `crlf` segment so trailers can be
+// added.
+//
+// If the payload size is -1 for unknown length the content will be buffered in
+// defaultChunkLength chunks before wrapped in aws-chunked chunk encoding.
+func newUnsignedChunkReader(reader io.Reader, payloadSize int64) io.Reader {
+ if payloadSize == -1 {
+ return newBufferedAWSChunkReader(reader, defaultChunkLength)
+ }
+
+ var endChunk bytes.Buffer
+ if payloadSize == 0 {
+ endChunk.Write(finalChunkBytes)
+ return &endChunk
+ }
+
+ endChunk.WriteString(crlf)
+ endChunk.Write(finalChunkBytes)
+
+ var header bytes.Buffer
+ header.WriteString(strconv.FormatInt(payloadSize, 16))
+ header.WriteString(crlf)
+ return io.MultiReader(
+ &header,
+ reader,
+ &endChunk,
+ )
+}
+
+// Provides a buffered aws-chunked chunk encoder of an underlying io.Reader.
+// Will include end chunk, but not the aws-chunked final `crlf` segment so
+// trailers can be added.
+//
+// Note does not implement support for chunk extensions, e.g. chunk signing.
+type bufferedAWSChunkReader struct {
+ reader io.Reader
+ chunkSize int
+ chunkSizeStr string
+
+ headerBuffer *bytes.Buffer
+ chunkBuffer *bytes.Buffer
+
+ multiReader io.Reader
+ multiReaderLen int
+ endChunkDone bool
+}
+
+// newBufferedAWSChunkReader returns an bufferedAWSChunkReader for reading
+// aws-chunked encoded chunks.
+func newBufferedAWSChunkReader(reader io.Reader, chunkSize int) *bufferedAWSChunkReader {
+ return &bufferedAWSChunkReader{
+ reader: reader,
+ chunkSize: chunkSize,
+ chunkSizeStr: strconv.FormatInt(int64(chunkSize), 16),
+
+ headerBuffer: bytes.NewBuffer(make([]byte, 0, 64)),
+ chunkBuffer: bytes.NewBuffer(make([]byte, 0, chunkSize+len(crlf))),
+ }
+}
+
+// Read attempts to read from the underlying io.Reader writing aws-chunked
+// chunk encoded bytes to p. When the underlying io.Reader has been completed
+// read the end chunk will be available. Once the end chunk is read, the reader
+// will return EOF.
+func (r *bufferedAWSChunkReader) Read(p []byte) (n int, err error) {
+ if r.multiReaderLen == 0 && r.endChunkDone {
+ return 0, io.EOF
+ }
+ if r.multiReader == nil || r.multiReaderLen == 0 {
+ r.multiReader, r.multiReaderLen, err = r.newMultiReader()
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ n, err = r.multiReader.Read(p)
+ r.multiReaderLen -= n
+
+ if err == io.EOF && !r.endChunkDone {
+ // Edge case handling when the multi-reader has been completely read,
+ // and returned an EOF, make sure that EOF only gets returned if the
+ // end chunk was included in the multi-reader. Otherwise, the next call
+ // to read will initialize the next chunk's multi-reader.
+ err = nil
+ }
+ return n, err
+}
+
+// newMultiReader returns a new io.Reader for wrapping the next chunk. Will
+// return an error if the underlying reader can not be read from. Will never
+// return io.EOF.
+func (r *bufferedAWSChunkReader) newMultiReader() (io.Reader, int, error) {
+ // io.Copy eats the io.EOF returned by io.LimitReader. Any error that
+ // occurs here is due to an actual read error.
+ n, err := io.Copy(r.chunkBuffer, io.LimitReader(r.reader, int64(r.chunkSize)))
+ if err != nil {
+ return nil, 0, err
+ }
+ if n == 0 {
+ // Early exit writing out only the end chunk. This does not include
+ // aws-chunk's final `crlf` so that trailers can still be added by
+ // upstream reader.
+ r.headerBuffer.Reset()
+ r.headerBuffer.WriteString("0")
+ r.headerBuffer.WriteString(crlf)
+ r.endChunkDone = true
+
+ return r.headerBuffer, r.headerBuffer.Len(), nil
+ }
+ r.chunkBuffer.WriteString(crlf)
+
+ chunkSizeStr := r.chunkSizeStr
+ if int(n) != r.chunkSize {
+ chunkSizeStr = strconv.FormatInt(n, 16)
+ }
+
+ r.headerBuffer.Reset()
+ r.headerBuffer.WriteString(chunkSizeStr)
+ r.headerBuffer.WriteString(crlf)
+
+ return io.MultiReader(
+ r.headerBuffer,
+ r.chunkBuffer,
+ ), r.headerBuffer.Len() + r.chunkBuffer.Len(), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
new file mode 100644
index 000000000..148f77de2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package checksum
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.7.0"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go
new file mode 100644
index 000000000..274d649fb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go
@@ -0,0 +1,175 @@
+package checksum
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// InputMiddlewareOptions provides the options for the request
+// checksum middleware setup.
+type InputMiddlewareOptions struct {
+ // GetAlgorithm is a function to get the checksum algorithm of the
+ // input payload from the input parameters.
+ //
+ // Given the input parameter value, the function must return the algorithm
+ // and true, or false if no algorithm is specified.
+ GetAlgorithm func(interface{}) (string, bool)
+
+ // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum.
+ // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored,
+ // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated
+ RequireChecksum bool
+
+ // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is
+ // set to true, checksum will be calculated and this field will be ignored, otherwise
+ // RequestChecksumCalculation will be used to indicate if checksum will be calculated
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+
+ // Enables support for wrapping the serialized input payload with a
+ // content-encoding: aws-check wrapper, and including a trailer for the
+ // algorithm's checksum value.
+ //
+ // The checksum will not be computed, nor added as trailing checksum, if
+ // the Algorithm's header is already set on the request.
+ EnableTrailingChecksum bool
+
+ // Enables support for computing the SHA256 checksum of input payloads
+ // along with the algorithm specified checksum. Prevents downstream
+ // middleware handlers (computePayloadSHA256) re-reading the payload.
+ //
+ // The SHA256 payload checksum will only be used for computed for requests
+ // that are not TLS, or do not enable trailing checksums.
+ //
+ // The SHA256 payload hash will not be computed, if the Algorithm's header
+ // is already set on the request.
+ EnableComputeSHA256PayloadHash bool
+
+ // Enables support for setting the aws-chunked decoded content length
+ // header for the decoded length of the underlying stream. Will only be set
+ // when used with trailing checksums, and aws-chunked content-encoding.
+ EnableDecodedContentLengthHeader bool
+}
+
+// AddInputMiddleware adds the middleware for performing checksum computing
+// of request payloads, and checksum validation of response payloads.
+//
+// Deprecated: This internal-only runtime API is frozen. Do not call or modify
+// it in new code. Checksum-enabled service operations now generate this
+// middleware setup code inline per #2507.
+func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) {
+ // Initial checksum configuration look up middleware
+ err = stack.Initialize.Add(&SetupInputContext{
+ GetAlgorithm: options.GetAlgorithm,
+ RequireChecksum: options.RequireChecksum,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ }, middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ stack.Build.Remove("ContentChecksum")
+
+ inputChecksum := &ComputeInputPayloadChecksum{
+ EnableTrailingChecksum: options.EnableTrailingChecksum,
+ EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash,
+ EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader,
+ }
+ if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil {
+ return err
+ }
+
+ // If trailing checksum is not supported no need for finalize handler to be added.
+ if options.EnableTrailingChecksum {
+ trailerMiddleware := &AddInputChecksumTrailer{
+ EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum,
+ EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash,
+ EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader,
+ }
+ if err := stack.Finalize.Insert(trailerMiddleware, "Retry", middleware.After); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// RemoveInputMiddleware Removes the compute input payload checksum middleware
+// handlers from the stack.
+func RemoveInputMiddleware(stack *middleware.Stack) {
+ id := (*SetupInputContext)(nil).ID()
+ stack.Initialize.Remove(id)
+
+ id = (*ComputeInputPayloadChecksum)(nil).ID()
+ stack.Finalize.Remove(id)
+}
+
+// OutputMiddlewareOptions provides options for configuring output checksum
+// validation middleware.
+type OutputMiddlewareOptions struct {
+ // GetValidationMode is a function to get the checksum validation
+ // mode of the output payload from the input parameters.
+ //
+ // Given the input parameter value, the function must return the validation
+ // mode and true, or false if no mode is specified.
+ GetValidationMode func(interface{}) (string, bool)
+
+ // SetValidationMode is a function to set the checksum validation mode of input parameters
+ SetValidationMode func(interface{}, string)
+
+ // ResponseChecksumValidation is the user config to opt-in/out response checksum validation
+ ResponseChecksumValidation aws.ResponseChecksumValidation
+
+ // The set of checksum algorithms that should be used for response payload
+ // checksum validation. The algorithm(s) used will be a union of the
+ // output's returned algorithms and this set.
+ //
+ // Only the first algorithm in the union is currently used.
+ ValidationAlgorithms []string
+
+ // If set the middleware will ignore output multipart checksums. Otherwise
+ // a checksum format error will be returned by the middleware.
+ IgnoreMultipartValidation bool
+
+ // When set the middleware will log when output does not have checksum or
+ // algorithm to validate.
+ LogValidationSkipped bool
+
+ // When set the middleware will log when the output contains a multipart
+ // checksum that was, skipped and not validated.
+ LogMultipartValidationSkipped bool
+}
+
+// AddOutputMiddleware adds the middleware for validating response payload's
+// checksum.
+func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error {
+ err := stack.Initialize.Add(&setupOutputContext{
+ GetValidationMode: options.GetValidationMode,
+ SetValidationMode: options.SetValidationMode,
+ ResponseChecksumValidation: options.ResponseChecksumValidation,
+ }, middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ // Resolve a supported priority order list of algorithms to validate.
+ algorithms := FilterSupportedAlgorithms(options.ValidationAlgorithms)
+
+ m := &validateOutputPayloadChecksum{
+ Algorithms: algorithms,
+ IgnoreMultipartValidation: options.IgnoreMultipartValidation,
+ LogMultipartValidationSkipped: options.LogMultipartValidationSkipped,
+ LogValidationSkipped: options.LogValidationSkipped,
+ }
+
+ return stack.Deserialize.Add(m, middleware.After)
+}
+
+// RemoveOutputMiddleware Removes the compute input payload checksum middleware
+// handlers from the stack.
+func RemoveOutputMiddleware(stack *middleware.Stack) {
+ id := (*setupOutputContext)(nil).ID()
+ stack.Initialize.Remove(id)
+
+ id = (*validateOutputPayloadChecksum)(nil).ID()
+ stack.Deserialize.Remove(id)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go
new file mode 100644
index 000000000..861a44293
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go
@@ -0,0 +1,90 @@
+package checksum
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+var supportedChecksumFeatures = map[Algorithm]awsmiddleware.UserAgentFeature{
+ AlgorithmCRC32: awsmiddleware.UserAgentFeatureRequestChecksumCRC32,
+ AlgorithmCRC32C: awsmiddleware.UserAgentFeatureRequestChecksumCRC32C,
+ AlgorithmSHA1: awsmiddleware.UserAgentFeatureRequestChecksumSHA1,
+ AlgorithmSHA256: awsmiddleware.UserAgentFeatureRequestChecksumSHA256,
+ AlgorithmCRC64NVME: awsmiddleware.UserAgentFeatureRequestChecksumCRC64,
+}
+
+// RequestChecksumMetricsTracking is the middleware to track operation request's checksum usage
+type RequestChecksumMetricsTracking struct {
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+ UserAgent *awsmiddleware.RequestUserAgent
+}
+
+// ID provides the middleware identifier
+func (m *RequestChecksumMetricsTracking) ID() string {
+ return "AWSChecksum:RequestMetricsTracking"
+}
+
+// HandleBuild checks request checksum config and checksum value sent
+// and sends corresponding feature id to user agent
+func (m *RequestChecksumMetricsTracking) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ switch m.RequestChecksumCalculation {
+ case aws.RequestChecksumCalculationWhenSupported:
+ m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenSupported)
+ case aws.RequestChecksumCalculationWhenRequired:
+ m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenRequired)
+ }
+
+ for algo, feat := range supportedChecksumFeatures {
+ checksumHeader := AlgorithmHTTPHeader(algo)
+ if checksum := req.Header.Get(checksumHeader); checksum != "" {
+ m.UserAgent.AddUserAgentFeature(feat)
+ }
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+// ResponseChecksumMetricsTracking is the middleware to track operation response's checksum usage
+type ResponseChecksumMetricsTracking struct {
+ ResponseChecksumValidation aws.ResponseChecksumValidation
+ UserAgent *awsmiddleware.RequestUserAgent
+}
+
+// ID provides the middleware identifier
+func (m *ResponseChecksumMetricsTracking) ID() string {
+ return "AWSChecksum:ResponseMetricsTracking"
+}
+
+// HandleBuild checks the response checksum config and sends corresponding feature id to user agent
+func (m *ResponseChecksumMetricsTracking) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ switch m.ResponseChecksumValidation {
+ case aws.ResponseChecksumValidationWhenSupported:
+ m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenSupported)
+ case aws.ResponseChecksumValidationWhenRequired:
+ m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenRequired)
+ }
+
+ return next.HandleBuild(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go
new file mode 100644
index 000000000..ee8ff5454
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go
@@ -0,0 +1,428 @@
+package checksum
+
+import (
+ "context"
+ "crypto/sha256"
+ "fmt"
+ "hash"
+ "io"
+ "strconv"
+ "strings"
+
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const (
+ streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
+)
+
+// computedInputChecksumsKey is the metadata key for recording the algorithm the
+// checksum was computed for and the checksum value.
+type computedInputChecksumsKey struct{}
+
+// GetComputedInputChecksums returns the map of checksum algorithm to their
+// computed value stored in the middleware Metadata. Returns false if no values
+// were stored in the Metadata.
+func GetComputedInputChecksums(m middleware.Metadata) (map[string]string, bool) {
+ vs, ok := m.Get(computedInputChecksumsKey{}).(map[string]string)
+ return vs, ok
+}
+
+// SetComputedInputChecksums stores the map of checksum algorithm to their
+// computed value in the middleware Metadata. Overwrites any values that
+// currently exist in the metadata.
+func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) {
+ m.Set(computedInputChecksumsKey{}, vs)
+}
+
+// ComputeInputPayloadChecksum middleware computes payload checksum
+type ComputeInputPayloadChecksum struct {
+ // Enables support for wrapping the serialized input payload with a
+ // content-encoding: aws-check wrapper, and including a trailer for the
+ // algorithm's checksum value.
+ //
+ // The checksum will not be computed, nor added as trailing checksum, if
+ // the Algorithm's header is already set on the request.
+ EnableTrailingChecksum bool
+
+ // Enables support for computing the SHA256 checksum of input payloads
+ // along with the algorithm specified checksum. Prevents downstream
+ // middleware handlers (computePayloadSHA256) re-reading the payload.
+ //
+ // The SHA256 payload hash will only be used for computed for requests
+ // that are not TLS, or do not enable trailing checksums.
+ //
+ // The SHA256 payload hash will not be computed, if the Algorithm's header
+ // is already set on the request.
+ EnableComputePayloadHash bool
+
+ // Enables support for setting the aws-chunked decoded content length
+ // header for the decoded length of the underlying stream. Will only be set
+ // when used with trailing checksums, and aws-chunked content-encoding.
+ EnableDecodedContentLengthHeader bool
+
+ useTrailer bool
+}
+
+type useTrailer struct{}
+
+// ID provides the middleware's identifier.
+func (m *ComputeInputPayloadChecksum) ID() string {
+ return "AWSChecksum:ComputeInputPayloadChecksum"
+}
+
+type computeInputHeaderChecksumError struct {
+ Msg string
+ Err error
+}
+
+func (e computeInputHeaderChecksumError) Error() string {
+ const intro = "compute input header checksum failed"
+
+ if e.Err != nil {
+ return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err)
+ }
+
+ return fmt.Sprintf("%s, %s", intro, e.Msg)
+}
+func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err }
+
+// HandleFinalize handles computing the payload's checksum, in the following cases:
+// - Is HTTP, not HTTPS
+// - RequireChecksum is true, and no checksums were specified via the Input
+// - Trailing checksums are not supported
+//
+// The build handler must be inserted in the stack before ContentPayloadHash
+// and after ComputeContentLength.
+func (m *ComputeInputPayloadChecksum) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ var checksum string
+ algorithm, ok, err := getInputAlgorithm(ctx)
+ if err != nil {
+ return out, metadata, err
+ }
+ if !ok {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, computeInputHeaderChecksumError{
+ Msg: fmt.Sprintf("unknown request type %T", req),
+ }
+ }
+
+ defer func() {
+ if algorithm == "" || checksum == "" || err != nil {
+ return
+ }
+
+ // Record the checksum and algorithm that was computed
+ SetComputedInputChecksums(&metadata, map[string]string{
+ string(algorithm): checksum,
+ })
+ }()
+
+ // If any checksum header is already set nothing to do.
+ for header := range req.Header {
+ h := strings.ToUpper(header)
+ if strings.HasPrefix(h, "X-AMZ-CHECKSUM-") {
+ algorithm = Algorithm(strings.TrimPrefix(h, "X-AMZ-CHECKSUM-"))
+ checksum = req.Header.Get(header)
+ return next.HandleFinalize(ctx, in)
+ }
+ }
+
+ computePayloadHash := m.EnableComputePayloadHash
+ if v := v4.GetPayloadHash(ctx); v != "" {
+ computePayloadHash = false
+ }
+
+ stream := req.GetStream()
+ streamLength, err := getRequestStreamLength(req)
+ if err != nil {
+ return out, metadata, computeInputHeaderChecksumError{
+ Msg: "failed to determine stream length",
+ Err: err,
+ }
+ }
+
+ // If trailing checksums are supported, the request is HTTPS, and the
+ // stream is not nil or empty, instead switch to a trailing checksum.
+ //
+ // Nil and empty streams will always be handled as a request header,
+ // regardless if the operation supports trailing checksums or not.
+ if req.IsHTTPS() && !presignedurlcust.GetIsPresigning(ctx) {
+ if stream != nil && streamLength != 0 && m.EnableTrailingChecksum {
+ if m.EnableComputePayloadHash {
+ // ContentSHA256Header middleware handles the header
+ ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash)
+ }
+ m.useTrailer = true
+ ctx = middleware.WithStackValue(ctx, useTrailer{}, true)
+ return next.HandleFinalize(ctx, in)
+ }
+
+ // If trailing checksums are not enabled but protocol is still HTTPS
+ // disabling computing the payload hash. Downstream middleware handler
+ // (ComputetPayloadHash) will set the payload hash to unsigned payload,
+ // if signing was used.
+ computePayloadHash = false
+ }
+
+ // Only seekable streams are supported for non-trailing checksums, because
+ // the stream needs to be rewound before the handler can continue.
+ if stream != nil && !req.IsStreamSeekable() {
+ return out, metadata, computeInputHeaderChecksumError{
+ Msg: "unseekable stream is not supported without TLS and trailing checksum",
+ }
+ }
+
+ var sha256Checksum string
+ checksum, sha256Checksum, err = computeStreamChecksum(
+ algorithm, stream, computePayloadHash)
+ if err != nil {
+ return out, metadata, computeInputHeaderChecksumError{
+ Msg: "failed to compute stream checksum",
+ Err: err,
+ }
+ }
+
+ if err := req.RewindStream(); err != nil {
+ return out, metadata, computeInputHeaderChecksumError{
+ Msg: "failed to rewind stream",
+ Err: err,
+ }
+ }
+
+ checksumHeader := AlgorithmHTTPHeader(algorithm)
+ req.Header.Set(checksumHeader, checksum)
+
+ if computePayloadHash {
+ ctx = v4.SetPayloadHash(ctx, sha256Checksum)
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+type computeInputTrailingChecksumError struct {
+ Msg string
+ Err error
+}
+
+func (e computeInputTrailingChecksumError) Error() string {
+ const intro = "compute input trailing checksum failed"
+
+ if e.Err != nil {
+ return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err)
+ }
+
+ return fmt.Sprintf("%s, %s", intro, e.Msg)
+}
+func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err }
+
+// AddInputChecksumTrailer adds HTTP checksum when
+// - Is HTTPS, not HTTP
+// - A checksum was specified via the Input
+// - Trailing checksums are supported.
+type AddInputChecksumTrailer struct {
+ EnableTrailingChecksum bool
+ EnableComputePayloadHash bool
+ EnableDecodedContentLengthHeader bool
+}
+
+// ID identifies this middleware.
+func (*AddInputChecksumTrailer) ID() string {
+ return "addInputChecksumTrailer"
+}
+
+// HandleFinalize wraps the request body to write the trailing checksum.
+func (m *AddInputChecksumTrailer) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ algorithm, ok, err := getInputAlgorithm(ctx)
+ if err != nil {
+ return out, metadata, computeInputTrailingChecksumError{
+ Msg: "failed to get algorithm",
+ Err: err,
+ }
+ } else if !ok {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ if enabled, _ := middleware.GetStackValue(ctx, useTrailer{}).(bool); !enabled {
+ return next.HandleFinalize(ctx, in)
+ }
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, computeInputTrailingChecksumError{
+ Msg: fmt.Sprintf("unknown request type %T", req),
+ }
+ }
+
+ // Trailing checksums are only supported when TLS is enabled.
+ if !req.IsHTTPS() {
+ return out, metadata, computeInputTrailingChecksumError{
+ Msg: "HTTPS required",
+ }
+ }
+
+ // If any checksum header is already set nothing to do.
+ for header := range req.Header {
+ if strings.HasPrefix(strings.ToLower(header), "x-amz-checksum-") {
+ return next.HandleFinalize(ctx, in)
+ }
+ }
+
+ stream := req.GetStream()
+ streamLength, err := getRequestStreamLength(req)
+ if err != nil {
+ return out, metadata, computeInputTrailingChecksumError{
+ Msg: "failed to determine stream length",
+ Err: err,
+ }
+ }
+
+ if stream == nil || streamLength == 0 {
+ // Nil and empty streams are handled by the Build handler. They are not
+ // supported by the trailing checksums finalize handler. There is no
+ // benefit to sending them as trailers compared to headers.
+ return out, metadata, computeInputTrailingChecksumError{
+ Msg: "nil or empty streams are not supported",
+ }
+ }
+
+ checksumReader, err := newComputeChecksumReader(stream, algorithm)
+ if err != nil {
+ return out, metadata, computeInputTrailingChecksumError{
+ Msg: "failed to created checksum reader",
+ Err: err,
+ }
+ }
+
+ awsChunkedReader := newUnsignedAWSChunkedEncoding(checksumReader,
+ func(o *awsChunkedEncodingOptions) {
+ o.Trailers[AlgorithmHTTPHeader(checksumReader.Algorithm())] = awsChunkedTrailerValue{
+ Get: checksumReader.Base64Checksum,
+ Length: checksumReader.Base64ChecksumLength(),
+ }
+ o.StreamLength = streamLength
+ })
+
+ for key, values := range awsChunkedReader.HTTPHeaders() {
+ for _, value := range values {
+ req.Header.Add(key, value)
+ }
+ }
+
+ // Setting the stream on the request will create a copy. The content length
+ // is not updated until after the request is copied to prevent impacting
+ // upstream middleware.
+ req, err = req.SetStream(awsChunkedReader)
+ if err != nil {
+ return out, metadata, computeInputTrailingChecksumError{
+ Msg: "failed updating request to trailing checksum wrapped stream",
+ Err: err,
+ }
+ }
+ req.ContentLength = awsChunkedReader.EncodedLength()
+ in.Request = req
+
+ // Add decoded content length header if original stream's content length is known.
+ if streamLength != -1 && m.EnableDecodedContentLengthHeader {
+ req.Header.Set(decodedContentLengthHeaderName, strconv.FormatInt(streamLength, 10))
+ }
+
+ out, metadata, err = next.HandleFinalize(ctx, in)
+ if err == nil {
+ checksum, err := checksumReader.Base64Checksum()
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to get computed checksum, %w", err)
+ }
+
+ // Record the checksum and algorithm that was computed
+ SetComputedInputChecksums(&metadata, map[string]string{
+ string(algorithm): checksum,
+ })
+ }
+
+ return out, metadata, err
+}
+
+func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) {
+ ctxAlgorithm := internalcontext.GetChecksumInputAlgorithm(ctx)
+ if ctxAlgorithm == "" {
+ return "", false, nil
+ }
+
+ algorithm, err := ParseAlgorithm(ctxAlgorithm)
+ if err != nil {
+ return "", false, fmt.Errorf(
+ "failed to parse algorithm, %w", err)
+ }
+
+ return algorithm, true, nil
+}
+
+func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayloadHash bool) (
+ checksum string, sha256Checksum string, err error,
+) {
+ hasher, err := NewAlgorithmHash(algorithm)
+ if err != nil {
+ return "", "", fmt.Errorf(
+ "failed to get hasher for checksum algorithm, %w", err)
+ }
+
+ var sha256Hasher hash.Hash
+ var batchHasher io.Writer = hasher
+
+ // Compute payload hash for the protocol. To prevent another handler
+ // (computePayloadSHA256) re-reading body also compute the SHA256 for
+ // request signing. If configured checksum algorithm is SHA256, don't
+ // double wrap stream with another SHA256 hasher.
+ if computePayloadHash && algorithm != AlgorithmSHA256 {
+ sha256Hasher = sha256.New()
+ batchHasher = io.MultiWriter(hasher, sha256Hasher)
+ }
+
+ if stream != nil {
+ if _, err = io.Copy(batchHasher, stream); err != nil {
+ return "", "", fmt.Errorf(
+ "failed to read stream to compute hash, %w", err)
+ }
+ }
+
+ checksum = string(base64EncodeHashSum(hasher))
+ if computePayloadHash {
+ if algorithm != AlgorithmSHA256 {
+ sha256Checksum = string(hexEncodeHashSum(sha256Hasher))
+ } else {
+ sha256Checksum = string(hexEncodeHashSum(hasher))
+ }
+ }
+
+ return checksum, sha256Checksum, nil
+}
+
+func getRequestStreamLength(req *smithyhttp.Request) (int64, error) {
+ if v := req.ContentLength; v > 0 {
+ return v, nil
+ }
+
+ if length, ok, err := req.StreamLength(); err != nil {
+ return 0, fmt.Errorf("failed getting request stream's length, %w", err)
+ } else if ok {
+ return length, nil
+ }
+
+ return -1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go
new file mode 100644
index 000000000..3347e88cc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go
@@ -0,0 +1,122 @@
+package checksum
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+const (
+ checksumValidationModeEnabled = "ENABLED"
+)
+
+// SetupInputContext is the initial middleware that looks up the input
+// used to configure checksum behavior. This middleware must be executed before
+// input validation step or any other checksum middleware.
+type SetupInputContext struct {
+ // GetAlgorithm is a function to get the checksum algorithm of the
+ // input payload from the input parameters.
+ //
+ // Given the input parameter value, the function must return the algorithm
+ // and true, or false if no algorithm is specified.
+ GetAlgorithm func(interface{}) (string, bool)
+
+ // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum.
+ // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored,
+ // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated
+ RequireChecksum bool
+
+ // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is
+ // set to true, checksum will be calculated and this field will be ignored, otherwise
+ // RequestChecksumCalculation will be used to indicate if checksum will be calculated
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+}
+
+// ID for the middleware
+func (m *SetupInputContext) ID() string {
+ return "AWSChecksum:SetupInputContext"
+}
+
+// HandleInitialize initialization middleware that setups up the checksum
+// context based on the input parameters provided in the stack.
+func (m *SetupInputContext) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ // nil check here is for operations that require checksum but do not have input algorithm setting
+ if m.GetAlgorithm != nil {
+ if algorithm, ok := m.GetAlgorithm(in.Parameters); ok {
+ ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm)
+ return next.HandleInitialize(ctx, in)
+ }
+ }
+
+ if m.RequireChecksum || m.RequestChecksumCalculation == aws.RequestChecksumCalculationWhenSupported {
+ ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32))
+ }
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type setupOutputContext struct {
+ // GetValidationMode is a function to get the checksum validation
+ // mode of the output payload from the input parameters.
+ //
+ // Given the input parameter value, the function must return the validation
+ // mode and true, or false if no mode is specified.
+ GetValidationMode func(interface{}) (string, bool)
+
+ // SetValidationMode is a function to set the checksum validation mode of input parameters
+ SetValidationMode func(interface{}, string)
+
+ // ResponseChecksumValidation states user config to opt-in/out checksum validation
+ ResponseChecksumValidation aws.ResponseChecksumValidation
+}
+
+// ID for the middleware
+func (m *setupOutputContext) ID() string {
+ return "AWSChecksum:SetupOutputContext"
+}
+
+// HandleInitialize initialization middleware that setups up the checksum
+// context based on the input parameters provided in the stack.
+func (m *setupOutputContext) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+
+ mode, _ := m.GetValidationMode(in.Parameters)
+
+ if m.ResponseChecksumValidation == aws.ResponseChecksumValidationWhenSupported || mode == checksumValidationModeEnabled {
+ m.SetValidationMode(in.Parameters, checksumValidationModeEnabled)
+ ctx = setContextOutputValidationMode(ctx, checksumValidationModeEnabled)
+ }
+
+ return next.HandleInitialize(ctx, in)
+}
+
+// outputValidationModeKey is the key set on context used to identify if
+// output checksum validation is enabled.
+type outputValidationModeKey struct{}
+
+// setContextOutputValidationMode sets the request checksum
+// algorithm on the context.
+//
+// Scoped to stack values.
+func setContextOutputValidationMode(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, outputValidationModeKey{}, value)
+}
+
+// getContextOutputValidationMode returns response checksum validation state,
+// if one was specified. Empty string is returned if one is not specified.
+//
+// Scoped to stack values.
+func getContextOutputValidationMode(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, outputValidationModeKey{}).(string)
+ return v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go
new file mode 100644
index 000000000..6b56f8c6a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go
@@ -0,0 +1,129 @@
+package checksum
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// outputValidationAlgorithmsUsedKey is the metadata key for indexing the algorithms
+// that were used, by the middleware's validation.
+type outputValidationAlgorithmsUsedKey struct{}
+
+// GetOutputValidationAlgorithmsUsed returns the checksum algorithms used
+// stored in the middleware Metadata. Returns false if no algorithms were
+// stored in the Metadata.
+func GetOutputValidationAlgorithmsUsed(m middleware.Metadata) ([]string, bool) {
+ vs, ok := m.Get(outputValidationAlgorithmsUsedKey{}).([]string)
+ return vs, ok
+}
+
+// SetOutputValidationAlgorithmsUsed stores the checksum algorithms used in the
+// middleware Metadata.
+func SetOutputValidationAlgorithmsUsed(m *middleware.Metadata, vs []string) {
+ m.Set(outputValidationAlgorithmsUsedKey{}, vs)
+}
+
+// validateOutputPayloadChecksum middleware computes payload checksum of the
+// received response and validates with checksum returned by the service.
+type validateOutputPayloadChecksum struct {
+ // Algorithms represents a priority-ordered list of valid checksum
+ // algorithm that should be validated when present in HTTP response
+ // headers.
+ Algorithms []Algorithm
+
+ // IgnoreMultipartValidation indicates multipart checksums ending with "-#"
+ // will be ignored.
+ IgnoreMultipartValidation bool
+
+ // When set the middleware will log when output does not have checksum or
+ // algorithm to validate.
+ LogValidationSkipped bool
+
+ // When set the middleware will log when the output contains a multipart
+ // checksum that was, skipped and not validated.
+ LogMultipartValidationSkipped bool
+}
+
+func (m *validateOutputPayloadChecksum) ID() string {
+ return "AWSChecksum:ValidateOutputPayloadChecksum"
+}
+
+// HandleDeserialize is a Deserialize middleware that wraps the HTTP response
+// body with an io.ReadCloser that will validate its checksum.
+func (m *validateOutputPayloadChecksum) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ if mode := getContextOutputValidationMode(ctx); mode != checksumValidationModeEnabled {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("unknown transport type %T", out.RawResponse),
+ }
+ }
+
+ var expectedChecksum string
+ var algorithmToUse Algorithm
+ for _, algorithm := range m.Algorithms {
+ value := response.Header.Get(AlgorithmHTTPHeader(algorithm))
+ if len(value) == 0 {
+ continue
+ }
+
+ expectedChecksum = value
+ algorithmToUse = algorithm
+ }
+
+ logger := middleware.GetLogger(ctx)
+
+ // Skip validation if no checksum algorithm or checksum is available.
+ if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 {
+ if response.StatusCode != 404 && response.Body != http.NoBody && m.LogValidationSkipped {
+ // TODO this probably should have more information about the
+ // operation output that won't be validated.
+ logger.Logf(logging.Warn,
+ "Response has no supported checksum. Not validating response payload.")
+ }
+ return out, metadata, nil
+ }
+
+ // Ignore multipart validation
+ if m.IgnoreMultipartValidation && strings.Contains(expectedChecksum, "-") {
+ if m.LogMultipartValidationSkipped {
+ // TODO this probably should have more information about the
+ // operation output that won't be validated.
+ logger.Logf(logging.Warn, "Skipped validation of multipart checksum.")
+ }
+ return out, metadata, nil
+ }
+
+ body, err := newValidateChecksumReader(response.Body, algorithmToUse, expectedChecksum)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to create checksum validation reader, %w", err)
+ }
+ response.Body = body
+
+ // Update the metadata to include the set of the checksum algorithms that
+ // will be validated.
+ SetOutputValidationAlgorithmsUsed(&metadata, []string{
+ string(algorithmToUse),
+ })
+
+ return out, metadata, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
new file mode 100644
index 000000000..2021865dd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
@@ -0,0 +1,500 @@
+# v1.13.13 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.13.12 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.11 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.10 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2025-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2025-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2025-09-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.5 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2025-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2025-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.18 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.17 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.16 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.15 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.14 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.11 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.12.9 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.7 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.5 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.20 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.19 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.18 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.17 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.16 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.15 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.14 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.13 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.12 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.11 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.10 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.7 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.6 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.5 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.4 (2024-03-05)
+
+* **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility.
+
+# v1.11.3 (2024-03-04)
+
+* **Bug Fix**: Correct a typo in internal AddAsIsPresigningMiddleware API.
+
+# v1.11.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.37 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.36 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.29 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.28 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.27 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.26 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.25 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.19 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.17 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.16 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.15 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.14 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.13 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.12 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.11 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.10 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.9 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.6 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-11-06)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
new file mode 100644
index 000000000..5d5286f92
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
@@ -0,0 +1,56 @@
+package presignedurl
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// WithIsPresigning adds the isPresigning sentinel value to a context to signal
+// that the middleware stack is using the presign flow.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func WithIsPresigning(ctx context.Context) context.Context {
+ return middleware.WithStackValue(ctx, isPresigningKey{}, true)
+}
+
+// GetIsPresigning returns if the context contains the isPresigning sentinel
+// value for presigning flows.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetIsPresigning(ctx context.Context) bool {
+ v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool)
+ return v
+}
+
+type isPresigningKey struct{}
+
+// AddAsIsPresigningMiddleware adds a middleware to the head of the stack that
+// will update the stack's context to be flagged as being invoked for the
+// purpose of presigning.
+func AddAsIsPresigningMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before)
+}
+
+// AddAsIsPresigingMiddleware is an alias for backwards compatibility.
+//
+// Deprecated: This API was released with a typo. Use
+// [AddAsIsPresigningMiddleware] instead.
+func AddAsIsPresigingMiddleware(stack *middleware.Stack) error {
+ return AddAsIsPresigningMiddleware(stack)
+}
+
+type asIsPresigningMiddleware struct{}
+
+func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" }
+
+func (asIsPresigningMiddleware) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = WithIsPresigning(ctx)
+ return next.HandleInitialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go
new file mode 100644
index 000000000..1b85375cf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go
@@ -0,0 +1,3 @@
+// Package presignedurl provides the customizations for API clients to fill in
+// presigned URLs into input parameters.
+package presignedurl
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
new file mode 100644
index 000000000..9d29218c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package presignedurl
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.13.13"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go
new file mode 100644
index 000000000..1e2f5c812
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go
@@ -0,0 +1,110 @@
+package presignedurl
+
+import (
+ "context"
+ "fmt"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// URLPresigner provides the interface to presign the input parameters in to a
+// presigned URL.
+type URLPresigner interface {
+ // PresignURL presigns a URL.
+ PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error)
+}
+
+// ParameterAccessor provides an collection of accessor to for retrieving and
+// setting the values needed to PresignedURL generation
+type ParameterAccessor struct {
+ // GetPresignedURL accessor points to a function that retrieves a presigned url if present
+ GetPresignedURL func(interface{}) (string, bool, error)
+
+ // GetSourceRegion accessor points to a function that retrieves source region for presigned url
+ GetSourceRegion func(interface{}) (string, bool, error)
+
+ // CopyInput accessor points to a function that takes in an input, and returns a copy.
+ CopyInput func(interface{}) (interface{}, error)
+
+ // SetDestinationRegion accessor points to a function that sets destination region on api input struct
+ SetDestinationRegion func(interface{}, string) error
+
+ // SetPresignedURL accessor points to a function that sets presigned url on api input struct
+ SetPresignedURL func(interface{}, string) error
+}
+
+// Options provides the set of options needed by the presigned URL middleware.
+type Options struct {
+ // Accessor are the parameter accessors used by this middleware
+ Accessor ParameterAccessor
+
+ // Presigner is the URLPresigner used by the middleware
+ Presigner URLPresigner
+}
+
+// AddMiddleware adds the Presign URL middleware to the middleware stack.
+func AddMiddleware(stack *middleware.Stack, opts Options) error {
+ return stack.Initialize.Add(&presign{options: opts}, middleware.Before)
+}
+
+// RemoveMiddleware removes the Presign URL middleware from the stack.
+func RemoveMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Initialize.Remove((*presign)(nil).ID())
+ return err
+}
+
+type presign struct {
+ options Options
+}
+
+func (m *presign) ID() string { return "Presign" }
+
+func (m *presign) HandleInitialize(
+ ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ // If PresignedURL is already set ignore middleware.
+ if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil {
+ return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+ } else if ok {
+ return next.HandleInitialize(ctx, input)
+ }
+
+ // If have source region is not set ignore middleware.
+ srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters)
+ if err != nil {
+ return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+ } else if !ok || len(srcRegion) == 0 {
+ return next.HandleInitialize(ctx, input)
+ }
+
+ // Create a copy of the original input so the destination region value can
+ // be added. This ensures that value does not leak into the original
+ // request parameters.
+ paramCpy, err := m.options.Accessor.CopyInput(input.Parameters)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err)
+ }
+
+ // Destination region is the API client's configured region.
+ dstRegion := awsmiddleware.GetRegion(ctx)
+ if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil {
+ return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+ }
+
+ presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy)
+ if err != nil {
+ return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err)
+ }
+
+ // Update the original input with the presigned URL value.
+ if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil {
+ return out, metadata, fmt.Errorf("presign middleware failed, %w", err)
+ }
+
+ return next.HandleInitialize(ctx, input)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
new file mode 100644
index 000000000..ff2188db8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
@@ -0,0 +1,429 @@
+# v1.18.15 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.14 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.13 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.12 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.11 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.10 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.18.9 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.8 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.6 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.18 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.17 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.16 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.15 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.14 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.13 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.12 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.11 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.10 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.9 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.8 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2024-03-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.6 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.5 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.4 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.5 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.4 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.3 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.2 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.1 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-03-21)
+
+* **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.19 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.17 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.16 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.15 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.14 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.13 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.12 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.11 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.10 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.2 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.2 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-09-02)
+
+* **Feature**: Add support for S3 Multi-Region Access Point ARNs.
+
+# v1.6.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-06-04)
+
+* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated.
+
+# v1.3.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go
new file mode 100644
index 000000000..ec290b213
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/accesspoint_arn.go
@@ -0,0 +1,53 @@
+package arn
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws/arn"
+)
+
+// AccessPointARN provides representation
+type AccessPointARN struct {
+ arn.ARN
+ AccessPointName string
+}
+
+// GetARN returns the base ARN for the Access Point resource
+func (a AccessPointARN) GetARN() arn.ARN {
+ return a.ARN
+}
+
+// ParseAccessPointResource attempts to parse the ARN's resource as an
+// AccessPoint resource.
+//
+// Supported Access point resource format:
+// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName}
+// - example: arn:aws:s3:us-west-2:012345678901:accesspoint/myaccesspoint
+func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) {
+ if isFIPS(a.Region) {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"}
+ }
+ if len(a.AccountID) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"}
+ }
+ if len(resParts) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
+ }
+ if len(resParts) > 1 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
+ }
+
+ resID := resParts[0]
+ if len(strings.TrimSpace(resID)) == 0 {
+ return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
+ }
+
+ return AccessPointARN{
+ ARN: a,
+ AccessPointName: resID,
+ }, nil
+}
+
+func isFIPS(region string) bool {
+ return strings.HasPrefix(region, "fips-") || strings.HasSuffix(region, "-fips")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go
new file mode 100644
index 000000000..06e1a3add
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn.go
@@ -0,0 +1,85 @@
+package arn
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws/arn"
+)
+
+var supportedServiceARN = []string{
+ "s3",
+ "s3-outposts",
+ "s3-object-lambda",
+}
+
+func isSupportedServiceARN(service string) bool {
+ for _, name := range supportedServiceARN {
+ if name == service {
+ return true
+ }
+ }
+ return false
+}
+
+// Resource provides the interfaces abstracting ARNs of specific resource
+// types.
+type Resource interface {
+ GetARN() arn.ARN
+ String() string
+}
+
+// ResourceParser provides the function for parsing an ARN's resource
+// component into a typed resource.
+type ResourceParser func(arn.ARN) (Resource, error)
+
+// ParseResource parses an AWS ARN into a typed resource for the S3 API.
+func ParseResource(a arn.ARN, resParser ResourceParser) (resARN Resource, err error) {
+ if len(a.Partition) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "partition not set"}
+ }
+
+ if !isSupportedServiceARN(a.Service) {
+ return nil, InvalidARNError{ARN: a, Reason: "service is not supported"}
+ }
+
+ if len(a.Resource) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "resource not set"}
+ }
+
+ return resParser(a)
+}
+
+// SplitResource splits the resource components by the ARN resource delimiters.
+func SplitResource(v string) []string {
+ var parts []string
+ var offset int
+
+ for offset <= len(v) {
+ idx := strings.IndexAny(v[offset:], "/:")
+ if idx < 0 {
+ parts = append(parts, v[offset:])
+ break
+ }
+ parts = append(parts, v[offset:idx+offset])
+ offset += idx + 1
+ }
+
+ return parts
+}
+
+// IsARN returns whether the given string is an ARN
+func IsARN(s string) bool {
+ return arn.IsARN(s)
+}
+
+// InvalidARNError provides the error for an invalid ARN error.
+type InvalidARNError struct {
+ ARN arn.ARN
+ Reason string
+}
+
+// Error returns a string denoting the occurred InvalidARNError
+func (e InvalidARNError) Error() string {
+ return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go
new file mode 100644
index 000000000..9a3258e15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go
@@ -0,0 +1,32 @@
+package arn
+
+import "fmt"
+
+// arnable is implemented by the relevant S3/S3Control
+// operations which have members that may need ARN
+// processing.
+type arnable interface {
+ SetARNMember(string) error
+ GetARNMember() (*string, bool)
+}
+
+// GetARNField would be called during middleware execution
+// to retrieve a member value that is an ARN in need of
+// processing.
+func GetARNField(input interface{}) (*string, bool) {
+ v, ok := input.(arnable)
+ if !ok {
+ return nil, false
+ }
+ return v.GetARNMember()
+}
+
+// SetARNField would called during middleware exeuction
+// to set a member value that required ARN processing.
+func SetARNField(input interface{}, v string) error {
+ params, ok := input.(arnable)
+ if !ok {
+ return fmt.Errorf("Params does not contain arn field member")
+ }
+ return params.SetARNMember(v)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go
new file mode 100644
index 000000000..e06a30285
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/outpost_arn.go
@@ -0,0 +1,128 @@
+package arn
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws/arn"
+)
+
+// OutpostARN interface that should be satisfied by outpost ARNs
+type OutpostARN interface {
+ Resource
+ GetOutpostID() string
+}
+
+// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format
+// and return a specific OutpostARN type
+//
+// Currently supported outpost ARN formats:
+// * Outpost AccessPoint ARN format:
+// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
+// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
+//
+// * Outpost Bucket ARN format:
+// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName}
+// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket
+//
+// Other outpost ARN formats may be supported and added in the future.
+func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) {
+ if len(a.Region) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "region not set"}
+ }
+
+ if isFIPS(a.Region) {
+ return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"}
+ }
+
+ if len(a.AccountID) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "account-id not set"}
+ }
+
+ // verify if outpost id is present and valid
+ if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 {
+ return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
+ }
+
+ // verify possible resource type exists
+ if len(resParts) < 3 {
+ return nil, InvalidARNError{
+ ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present",
+ }
+ }
+
+ // Since we know this is a OutpostARN fetch outpostID
+ outpostID := strings.TrimSpace(resParts[0])
+
+ switch resParts[1] {
+ case "accesspoint":
+ accesspointARN, err := ParseAccessPointResource(a, resParts[2:])
+ if err != nil {
+ return OutpostAccessPointARN{}, err
+ }
+ return OutpostAccessPointARN{
+ AccessPointARN: accesspointARN,
+ OutpostID: outpostID,
+ }, nil
+
+ case "bucket":
+ bucketName, err := parseBucketResource(a, resParts[2:])
+ if err != nil {
+ return nil, err
+ }
+ return OutpostBucketARN{
+ ARN: a,
+ BucketName: bucketName,
+ OutpostID: outpostID,
+ }, nil
+
+ default:
+ return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"}
+ }
+}
+
+// OutpostAccessPointARN represents outpost access point ARN.
+type OutpostAccessPointARN struct {
+ AccessPointARN
+ OutpostID string
+}
+
+// GetOutpostID returns the outpost id of outpost access point arn
+func (o OutpostAccessPointARN) GetOutpostID() string {
+ return o.OutpostID
+}
+
+// OutpostBucketARN represents the outpost bucket ARN.
+type OutpostBucketARN struct {
+ arn.ARN
+ BucketName string
+ OutpostID string
+}
+
+// GetOutpostID returns the outpost id of outpost bucket arn
+func (o OutpostBucketARN) GetOutpostID() string {
+ return o.OutpostID
+}
+
+// GetARN retrives the base ARN from outpost bucket ARN resource
+func (o OutpostBucketARN) GetARN() arn.ARN {
+ return o.ARN
+}
+
+// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the
+// bucket resource id.
+//
+// parseBucketResource only parses the bucket resource id.
+func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) {
+ if len(resParts) == 0 {
+ return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
+ }
+ if len(resParts) > 1 {
+ return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
+ }
+
+ bucketName = strings.TrimSpace(resParts[0])
+ if len(bucketName) == 0 {
+ return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
+ }
+ return bucketName, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go
new file mode 100644
index 000000000..513154cc0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/s3_object_lambda_arn.go
@@ -0,0 +1,15 @@
+package arn
+
+// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service
+type S3ObjectLambdaARN interface {
+ Resource
+
+ isS3ObjectLambdasARN()
+}
+
+// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type
+type S3ObjectLambdaAccessPointARN struct {
+ AccessPointARN
+}
+
+func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go
new file mode 100644
index 000000000..b51532085
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn_lookup.go
@@ -0,0 +1,73 @@
+package s3shared
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+
+ "github.com/aws/aws-sdk-go-v2/aws/arn"
+)
+
+// ARNLookup is the initial middleware that looks up if an arn is provided.
+// This middleware is responsible for fetching ARN from a arnable field, and registering the ARN on
+// middleware context. This middleware must be executed before input validation step or any other
+// arn processing middleware.
+type ARNLookup struct {
+
+ // GetARNValue takes in a input interface and returns a ptr to string and a bool
+ GetARNValue func(interface{}) (*string, bool)
+}
+
+// ID for the middleware
+func (m *ARNLookup) ID() string {
+ return "S3Shared:ARNLookup"
+}
+
+// HandleInitialize handles the behavior of this initialize step
+func (m *ARNLookup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ // check if GetARNValue is supported
+ if m.GetARNValue == nil {
+ return next.HandleInitialize(ctx, in)
+ }
+
+ // check is input resource is an ARN; if not go to next
+ v, ok := m.GetARNValue(in.Parameters)
+ if !ok || v == nil || !arn.IsARN(*v) {
+ return next.HandleInitialize(ctx, in)
+ }
+
+ // if ARN process ResourceRequest and put it on ctx
+ av, err := arn.Parse(*v)
+ if err != nil {
+ return out, metadata, fmt.Errorf("error parsing arn: %w", err)
+ }
+ // set parsed arn on context
+ ctx = setARNResourceOnContext(ctx, av)
+
+ return next.HandleInitialize(ctx, in)
+}
+
+// arnResourceKey is the key set on context used to identify, retrive an ARN resource
+// if present on the context.
+type arnResourceKey struct{}
+
+// SetARNResourceOnContext sets the S3 ARN on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setARNResourceOnContext(ctx context.Context, value arn.ARN) context.Context {
+ return middleware.WithStackValue(ctx, arnResourceKey{}, value)
+}
+
+// GetARNResourceFromContext returns an ARN from context and a bool indicating
+// presence of ARN on ctx.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetARNResourceFromContext(ctx context.Context) (arn.ARN, bool) {
+ v, ok := middleware.GetStackValue(ctx, arnResourceKey{}).(arn.ARN)
+ return v, ok
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go
new file mode 100644
index 000000000..b5d31f5c5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go
@@ -0,0 +1,41 @@
+package config
+
+import "context"
+
+// UseARNRegionProvider is an interface for retrieving external configuration value for UseARNRegion
+type UseARNRegionProvider interface {
+ GetS3UseARNRegion(ctx context.Context) (value bool, found bool, err error)
+}
+
+// DisableMultiRegionAccessPointsProvider is an interface for retrieving external configuration value for DisableMultiRegionAccessPoints
+type DisableMultiRegionAccessPointsProvider interface {
+ GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value bool, found bool, err error)
+}
+
+// ResolveUseARNRegion extracts the first instance of a UseARNRegion from the config slice.
+// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered.
+func ResolveUseARNRegion(ctx context.Context, configs []interface{}) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(UseARNRegionProvider); ok {
+ value, found, err = p.GetS3UseARNRegion(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ResolveDisableMultiRegionAccessPoints extracts the first instance of a DisableMultiRegionAccessPoints from the config slice.
+// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered.
+func ResolveDisableMultiRegionAccessPoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(DisableMultiRegionAccessPointsProvider); ok {
+ value, found, err = p.GetS3DisableMultiRegionAccessPoints(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go
new file mode 100644
index 000000000..aa0c3714e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/endpoint_error.go
@@ -0,0 +1,183 @@
+package s3shared
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn"
+)
+
+// TODO: fix these error statements to be relevant to v2 sdk
+
+const (
+ invalidARNErrorErrCode = "InvalidARNError"
+ configurationErrorErrCode = "ConfigurationError"
+)
+
+// InvalidARNError denotes the error for Invalid ARN
+type InvalidARNError struct {
+ message string
+ resource arn.Resource
+ origErr error
+}
+
+// Error returns the InvalidARN error string
+func (e InvalidARNError) Error() string {
+ var extra string
+ if e.resource != nil {
+ extra = "ARN: " + e.resource.String()
+ }
+ msg := invalidARNErrorErrCode + " : " + e.message
+ if extra != "" {
+ msg = msg + "\n\t" + extra
+ }
+
+ return msg
+}
+
+// OrigErr is the original error wrapped by Invalid ARN Error
+func (e InvalidARNError) Unwrap() error {
+ return e.origErr
+}
+
+// NewInvalidARNError denotes invalid arn error
+func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "invalid ARN",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition
+func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "resource ARN not supported for the target ARN partition",
+ origErr: err,
+ resource: resource,
+ }
+}
+
+// NewInvalidARNWithFIPSError ARN not supported for FIPS region
+//
+// Deprecated: FIPS will not appear in the ARN region component.
+func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError {
+ return InvalidARNError{
+ message: "resource ARN not supported for FIPS region",
+ resource: resource,
+ origErr: err,
+ }
+}
+
+// ConfigurationError is used to denote a client configuration error
+type ConfigurationError struct {
+ message string
+ resource arn.Resource
+ clientPartitionID string
+ clientRegion string
+ origErr error
+}
+
+// Error returns the Configuration error string
+func (e ConfigurationError) Error() string {
+ extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
+ e.resource, e.clientPartitionID, e.clientRegion)
+
+ msg := configurationErrorErrCode + " : " + e.message
+ if extra != "" {
+ msg = msg + "\n\t" + extra
+ }
+ return msg
+}
+
+// OrigErr is the original error wrapped by Configuration Error
+func (e ConfigurationError) Unwrap() error {
+ return e.origErr
+}
+
+// NewClientPartitionMismatchError stub
+func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client partition does not match provided ARN partition",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientRegionMismatchError denotes cross region access error
+func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client region does not match provided ARN region",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewFailedToResolveEndpointError denotes endpoint resolving error
+func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "endpoint resolver failed to find an endpoint for the provided ARN region",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access
+func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for fips but cross-region resource ARN provided",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS
+func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "use of ARN is not supported when client or request is configured for FIPS",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate
+func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for S3 Accelerate but is not supported with resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request
+func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
+
+// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack
+func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
+ return ConfigurationError{
+ message: "client configured for S3 Dual-stack but is not supported with resource ARN",
+ origErr: err,
+ resource: resource,
+ clientPartitionID: clientPartitionID,
+ clientRegion: clientRegion,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
new file mode 100644
index 000000000..a0129140c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package s3shared
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.18.15"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go
new file mode 100644
index 000000000..85b60d2a1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/host_id.go
@@ -0,0 +1,29 @@
+package s3shared
+
+import (
+ "github.com/aws/smithy-go/middleware"
+)
+
+// hostID is used to retrieve host id from response metadata
+type hostID struct {
+}
+
+// SetHostIDMetadata sets the provided host id over middleware metadata
+func SetHostIDMetadata(metadata *middleware.Metadata, id string) {
+ metadata.Set(hostID{}, id)
+}
+
+// GetHostIDMetadata retrieves the host id from middleware metadata
+// returns host id as string along with a boolean indicating presence of
+// hostId on middleware metadata.
+func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) {
+ if !metadata.Has(hostID{}) {
+ return "", false
+ }
+
+ v, ok := metadata.Get(hostID{}).(string)
+ if !ok {
+ return "", true
+ }
+ return v, true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go
new file mode 100644
index 000000000..f02604cb6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata.go
@@ -0,0 +1,28 @@
+package s3shared
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// clonedInputKey used to denote if request input was cloned.
+type clonedInputKey struct{}
+
+// SetClonedInputKey sets a key on context to denote input was cloned previously.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetClonedInputKey(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, clonedInputKey{}, value)
+}
+
+// IsClonedInput retrieves if context key for cloned input was set.
+// If set, we can infer that the reuqest input was cloned previously.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func IsClonedInput(ctx context.Context) bool {
+ v, _ := middleware.GetStackValue(ctx, clonedInputKey{}).(bool)
+ return v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go
new file mode 100644
index 000000000..7251588b0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go
@@ -0,0 +1,57 @@
+package s3shared
+
+import (
+ "context"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const metadataRetrieverID = "S3MetadataRetriever"
+
+// AddMetadataRetrieverMiddleware adds request id, host id retriever middleware
+func AddMetadataRetrieverMiddleware(stack *middleware.Stack) error {
+ // add metadata retriever middleware before operation deserializers so that it can retrieve metadata such as
+ // host id, request id from response header returned by operation deserializers
+ return stack.Deserialize.Insert(&metadataRetriever{}, "OperationDeserializer", middleware.Before)
+}
+
+type metadataRetriever struct {
+}
+
+// ID returns the middleware identifier
+func (m *metadataRetriever) ID() string {
+ return metadataRetrieverID
+}
+
+func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+
+ span, _ := tracing.GetSpan(ctx)
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ // No raw response to wrap with.
+ return out, metadata, err
+ }
+
+ // check for header for Request id
+ if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 {
+ // set reqID on metadata for successful responses.
+ awsmiddleware.SetRequestIDMetadata(&metadata, v)
+ span.SetProperty("aws.request_id", v)
+ }
+
+ // look up host-id
+ if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 {
+ // set reqID on metadata for successful responses.
+ SetHostIDMetadata(&metadata, v)
+ span.SetProperty("aws.extended_request_id", v)
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go
new file mode 100644
index 000000000..bee8da3fe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/resource_request.go
@@ -0,0 +1,77 @@
+package s3shared
+
+import (
+ "fmt"
+ "strings"
+
+ awsarn "github.com/aws/aws-sdk-go-v2/aws/arn"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn"
+)
+
+// ResourceRequest represents an ARN resource and api request metadata
+type ResourceRequest struct {
+ Resource arn.Resource
+ // RequestRegion is the region configured on the request config
+ RequestRegion string
+
+ // SigningRegion is the signing region resolved for the request
+ SigningRegion string
+
+ // PartitionID is the resolved partition id for the provided request region
+ PartitionID string
+
+ // UseARNRegion indicates if client should use the region provided in an ARN resource
+ UseARNRegion bool
+
+ // UseFIPS indicates if te client is configured for FIPS
+ UseFIPS bool
+}
+
+// ARN returns the resource ARN
+func (r ResourceRequest) ARN() awsarn.ARN {
+ return r.Resource.GetARN()
+}
+
+// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS
+//
+// Deprecated: FIPS will not be present in the ARN region
+func (r ResourceRequest) ResourceConfiguredForFIPS() bool {
+ return IsFIPS(r.ARN().Region)
+}
+
+// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set
+func (r ResourceRequest) AllowCrossRegion() bool {
+ return r.UseARNRegion
+}
+
+// IsCrossPartition returns true if request is configured for region of another partition, than
+// the partition that resource ARN region resolves to. IsCrossPartition will not return an error,
+// if request is not configured with a specific partition id. This might happen if customer provides
+// custom endpoint url, but does not associate a partition id with it.
+func (r ResourceRequest) IsCrossPartition() (bool, error) {
+ rv := r.PartitionID
+ if len(rv) == 0 {
+ return false, nil
+ }
+
+ av := r.Resource.GetARN().Partition
+ if len(av) == 0 {
+ return false, fmt.Errorf("no partition id for provided ARN")
+ }
+
+ return !strings.EqualFold(rv, av), nil
+}
+
+// IsCrossRegion returns true if request signing region is not same as arn region
+func (r ResourceRequest) IsCrossRegion() bool {
+ v := r.SigningRegion
+ return !strings.EqualFold(v, r.Resource.GetARN().Region)
+}
+
+// IsFIPS returns true if region is a fips pseudo-region
+//
+// Deprecated: FIPS should be specified via EndpointOptions.
+func IsFIPS(region string) bool {
+ return strings.HasPrefix(region, "fips-") ||
+ strings.HasSuffix(region, "-fips")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go
new file mode 100644
index 000000000..857336243
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error.go
@@ -0,0 +1,33 @@
+package s3shared
+
+import (
+ "errors"
+ "fmt"
+
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+)
+
+// ResponseError provides the HTTP centric error type wrapping the underlying error
+// with the HTTP response value and the deserialized RequestID.
+type ResponseError struct {
+ *awshttp.ResponseError
+
+ // HostID associated with response error
+ HostID string
+}
+
+// ServiceHostID returns the host id associated with Response Error
+func (e *ResponseError) ServiceHostID() string { return e.HostID }
+
+// Error returns the formatted error
+func (e *ResponseError) Error() string {
+ return fmt.Sprintf(
+ "https response error StatusCode: %d, RequestID: %s, HostID: %s, %v",
+ e.Response.StatusCode, e.RequestID, e.HostID, e.Err)
+}
+
+// As populates target and returns true if the type of target is a error type that
+// the ResponseError embeds, (e.g.S3 HTTP ResponseError)
+func (e *ResponseError) As(target interface{}) bool {
+ return errors.As(e.ResponseError, target)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go
new file mode 100644
index 000000000..543576245
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/response_error_middleware.go
@@ -0,0 +1,60 @@
+package s3shared
+
+import (
+ "context"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddResponseErrorMiddleware adds response error wrapper middleware
+func AddResponseErrorMiddleware(stack *middleware.Stack) error {
+ // add error wrapper middleware before request id retriever middleware so that it can wrap the error response
+ // returned by operation deserializers
+ return stack.Deserialize.Insert(&errorWrapper{}, metadataRetrieverID, middleware.Before)
+}
+
+type errorWrapper struct {
+}
+
+// ID returns the middleware identifier
+func (m *errorWrapper) ID() string {
+ return "ResponseErrorWrapper"
+}
+
+func (m *errorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err == nil {
+ // Nothing to do when there is no error.
+ return out, metadata, err
+ }
+
+ resp, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ // No raw response to wrap with.
+ return out, metadata, err
+ }
+
+ // look for request id in metadata
+ reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata)
+ // look for host id in metadata
+ hostID, _ := GetHostIDMetadata(metadata)
+
+ // Wrap the returned smithy error with the request id retrieved from the metadata
+ err = &ResponseError{
+ ResponseError: &awshttp.ResponseError{
+ ResponseError: &smithyhttp.ResponseError{
+ Response: resp,
+ Err: err,
+ },
+ RequestID: reqID,
+ },
+ HostID: hostID,
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go
new file mode 100644
index 000000000..0f43ec0d4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/s3100continue.go
@@ -0,0 +1,54 @@
+package s3shared
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const s3100ContinueID = "S3100Continue"
+const default100ContinueThresholdBytes int64 = 1024 * 1024 * 2
+
+// Add100Continue add middleware, which adds {Expect: 100-continue} header for s3 client HTTP PUT request larger than 2MB
+// or with unknown size streaming bodies, during operation builder step
+func Add100Continue(stack *middleware.Stack, continueHeaderThresholdBytes int64) error {
+ return stack.Build.Add(&s3100Continue{
+ continueHeaderThresholdBytes: continueHeaderThresholdBytes,
+ }, middleware.After)
+}
+
+type s3100Continue struct {
+ continueHeaderThresholdBytes int64
+}
+
+// ID returns the middleware identifier
+func (m *s3100Continue) ID() string {
+ return s3100ContinueID
+}
+
+func (m *s3100Continue) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ sizeLimit := default100ContinueThresholdBytes
+ switch {
+ case m.continueHeaderThresholdBytes == -1:
+ return next.HandleBuild(ctx, in)
+ case m.continueHeaderThresholdBytes > 0:
+ sizeLimit = m.continueHeaderThresholdBytes
+ default:
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ if req.ContentLength == -1 || (req.ContentLength == 0 && req.Body != nil) || req.ContentLength >= sizeLimit {
+ req.Header.Set("Expect", "100-continue")
+ }
+
+ return next.HandleBuild(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go
new file mode 100644
index 000000000..22773199f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/update_endpoint.go
@@ -0,0 +1,78 @@
+package s3shared
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+
+ awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
+)
+
+// EnableDualstack represents middleware struct for enabling dualstack support
+//
+// Deprecated: See EndpointResolverOptions' UseDualStackEndpoint support
+type EnableDualstack struct {
+ // UseDualstack indicates if dualstack endpoint resolving is to be enabled
+ UseDualstack bool
+
+ // DefaultServiceID is the service id prefix used in endpoint resolving
+ // by default service-id is 's3' and 's3-control' for service s3, s3control.
+ DefaultServiceID string
+}
+
+// ID returns the middleware ID.
+func (*EnableDualstack) ID() string {
+ return "EnableDualstack"
+}
+
+// HandleSerialize handles serializer middleware behavior when middleware is executed
+func (u *EnableDualstack) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+
+ // check for host name immutable property
+ if smithyhttp.GetHostnameImmutable(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ serviceID := awsmiddle.GetServiceID(ctx)
+
+ // s3-control may be represented as `S3 Control` as in model
+ if serviceID == "S3 Control" {
+ serviceID = "s3-control"
+ }
+
+ if len(serviceID) == 0 {
+ // default service id
+ serviceID = u.DefaultServiceID
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ if u.UseDualstack {
+ parts := strings.Split(req.URL.Host, ".")
+ if len(parts) < 3 {
+ return out, metadata, fmt.Errorf("unable to update endpoint host for dualstack, hostname invalid, %s", req.URL.Host)
+ }
+
+ for i := 0; i+1 < len(parts); i++ {
+ if strings.EqualFold(parts[i], serviceID) {
+ parts[i] = parts[i] + ".dualstack"
+ break
+ }
+ }
+
+ // construct the url host
+ req.URL.Host = strings.Join(parts, ".")
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go
new file mode 100644
index 000000000..65fd07e00
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/xml_utils.go
@@ -0,0 +1,89 @@
+package s3shared
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+)
+
+// ErrorComponents represents the error response fields
+// that will be deserialized from an xml error response body
+type ErrorComponents struct {
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+}
+
+// GetUnwrappedErrorResponseComponents returns the error fields from an xml error response body
+func GetUnwrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) {
+ var errComponents ErrorComponents
+ if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err)
+ }
+ return errComponents, nil
+}
+
+// GetWrappedErrorResponseComponents returns the error fields from an xml error response body
+// in which error code, and message are wrapped by a tag
+func GetWrappedErrorResponseComponents(r io.Reader) (ErrorComponents, error) {
+ var errComponents struct {
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+ HostID string `xml:"HostId"`
+ }
+
+ if err := xml.NewDecoder(r).Decode(&errComponents); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response : %w", err)
+ }
+
+ return ErrorComponents{
+ Code: errComponents.Code,
+ Message: errComponents.Message,
+ RequestID: errComponents.RequestID,
+ HostID: errComponents.HostID,
+ }, nil
+}
+
+// GetErrorResponseComponents retrieves error components according to passed in options
+func GetErrorResponseComponents(r io.Reader, options ErrorResponseDeserializerOptions) (ErrorComponents, error) {
+ var errComponents ErrorComponents
+ var err error
+
+ if options.IsWrappedWithErrorTag {
+ errComponents, err = GetWrappedErrorResponseComponents(r)
+ } else {
+ errComponents, err = GetUnwrappedErrorResponseComponents(r)
+ }
+
+ if err != nil {
+ return ErrorComponents{}, err
+ }
+
+ // If an error code or message is not retrieved, it is derived from the http status code
+ // eg, for S3 service, we derive err code and message, if none is found
+ if options.UseStatusCode && len(errComponents.Code) == 0 &&
+ len(errComponents.Message) == 0 {
+ // derive code and message from status code
+ statusText := http.StatusText(options.StatusCode)
+ errComponents.Code = strings.Replace(statusText, " ", "", -1)
+ errComponents.Message = statusText
+ }
+ return errComponents, nil
+}
+
+// ErrorResponseDeserializerOptions represents error response deserializer options for s3 and s3-control service
+type ErrorResponseDeserializerOptions struct {
+ // UseStatusCode denotes if status code should be used to retrieve error code, msg
+ UseStatusCode bool
+
+ // StatusCode is status code of error response
+ StatusCode int
+
+ //IsWrappedWithErrorTag represents if error response's code, msg is wrapped within an
+ // additional tag
+ IsWrappedWithErrorTag bool
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
new file mode 100644
index 000000000..9f27d13db
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
@@ -0,0 +1,830 @@
+# v1.78.2 (2025-03-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.78.1 (2025-03-04.2)
+
+* **Bug Fix**: Add assurance test for operation order.
+
+# v1.78.0 (2025-02-27)
+
+* **Feature**: Track credential providers via User-Agent Feature ids
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.77.1 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.77.0 (2025-02-14)
+
+* **Feature**: Added support for Content-Range header in HeadObject response.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.76.1 (2025-02-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.76.0 (2025-02-06)
+
+* **Feature**: Updated list of the valid AWS Region values for the LocationConstraint parameter for general purpose buckets.
+
+# v1.75.4 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.75.3 (2025-02-04)
+
+* No change notes available for this release.
+
+# v1.75.2 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.75.1 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.75.0 (2025-01-29)
+
+* **Feature**: Change the type of MpuObjectSize in CompleteMultipartUploadRequest from int to long.
+
+# v1.74.1 (2025-01-24)
+
+* **Bug Fix**: Enable request checksum validation mode by default
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.74.0 (2025-01-22)
+
+* **Feature**: Add a client config option to disable logging when output checksum validation is skipped due to an unsupported algorithm.
+
+# v1.73.2 (2025-01-17)
+
+* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.73.1 (2025-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.73.0 (2025-01-15)
+
+* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION.
+* **Feature**: This change enhances integrity protections for new SDK requests to S3. S3 SDKs now support the CRC64NVME checksum algorithm, full object checksums for multipart S3 objects, and new default integrity protections for S3 requests.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.72.3 (2025-01-14)
+
+* **Bug Fix**: Fix issue where waiters were not failing on unmatched errors as they should. This may have breaking behavioral changes for users in fringe cases. See [this announcement](https://github.com/aws/aws-sdk-go-v2/discussions/2954) for more information.
+
+# v1.72.2 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.72.1 (2025-01-08)
+
+* No change notes available for this release.
+
+# v1.72.0 (2025-01-03)
+
+* **Feature**: This change is only for updating the model regexp of CopySource which is not for validation but only for documentation and user guide change.
+
+# v1.71.1 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.71.0 (2024-12-03.2)
+
+* **Feature**: Amazon S3 Metadata stores object metadata in read-only, fully managed Apache Iceberg metadata tables that you can query. You can create metadata table configurations for S3 general purpose buckets.
+
+# v1.70.0 (2024-12-02)
+
+* **Feature**: Amazon S3 introduces support for AWS Dedicated Local Zones
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.69.0 (2024-11-25)
+
+* **Feature**: Amazon Simple Storage Service / Features: Add support for ETag based conditional writes in PutObject and CompleteMultiPartUpload APIs to prevent unintended object modifications.
+
+# v1.68.0 (2024-11-21)
+
+* **Feature**: Add support for conditional deletes for the S3 DeleteObject and DeleteObjects APIs. Add support for write offset bytes option used to append to objects with the S3 PutObject API.
+
+# v1.67.1 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.67.0 (2024-11-14)
+
+* **Feature**: This release updates the ListBuckets API Reference documentation in support of the new 10,000 general purpose bucket default quota on all AWS accounts. To increase your bucket quota from 10,000 to up to 1 million buckets, simply request a quota increase via Service Quotas.
+
+# v1.66.3 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.66.2 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.66.1 (2024-10-25)
+
+* **Bug Fix**: Update presign post URL resolution to use the exact result from EndpointResolverV2
+
+# v1.66.0 (2024-10-16)
+
+* **Feature**: Add support for the new optional bucket-region and prefix query parameters in the ListBuckets API. For ListBuckets requests that express pagination, Amazon S3 will now return both the bucket names and associated AWS regions in the response.
+
+# v1.65.3 (2024-10-11)
+
+* **Bug Fix**: **BREAKING CHANGE**: S3 ReplicationRuleFilter and LifecycleRuleFilter shapes are being changed from union to structure types
+
+# v1.65.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.65.1 (2024-10-07)
+
+* **Bug Fix**: **CHANGE IN BEHAVIOR**: Allow serialization of headers with empty string for prefix headers. We are deploying this fix because the behavior is actively preventing users from transmitting keys with empty values to the service. If you were setting metadata keys with empty values before this change, they will now actually be sent to the service.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.65.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.64.1 (2024-10-03)
+
+* No change notes available for this release.
+
+# v1.64.0 (2024-10-02)
+
+* **Feature**: This release introduces a header representing the minimum object size limit for Lifecycle transitions.
+
+# v1.63.3 (2024-09-27)
+
+* No change notes available for this release.
+
+# v1.63.2 (2024-09-25)
+
+* No change notes available for this release.
+
+# v1.63.1 (2024-09-23)
+
+* No change notes available for this release.
+
+# v1.63.0 (2024-09-20)
+
+* **Feature**: Add tracing and metrics support to service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.62.0 (2024-09-18)
+
+* **Feature**: Added SSE-KMS support for directory buckets.
+
+# v1.61.3 (2024-09-17)
+
+* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution.
+
+# v1.61.2 (2024-09-04)
+
+* No change notes available for this release.
+
+# v1.61.1 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.61.0 (2024-08-28)
+
+* **Feature**: Add presignPost for s3 PutObject
+
+# v1.60.1 (2024-08-22)
+
+* No change notes available for this release.
+
+# v1.60.0 (2024-08-20)
+
+* **Feature**: Amazon Simple Storage Service / Features : Add support for conditional writes for PutObject and CompleteMultipartUpload APIs.
+
+# v1.59.0 (2024-08-15)
+
+* **Feature**: Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API.
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.58.3 (2024-08-02)
+
+* **Bug Fix**: Add assurance tests for auth scheme selection logic.
+
+# v1.58.2 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.58.1 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.58.0 (2024-07-02)
+
+* **Feature**: Added response overrides to Head Object requests.
+
+# v1.57.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.57.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.56.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.56.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.55.2 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.55.1 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.55.0 (2024-06-05)
+
+* **Feature**: Added new params copySource and key to copyObject API for supporting S3 Access Grants plugin. These changes will not change any of the existing S3 API functionality.
+* **Bug Fix**: Add S3-specific smithy protocol tests.
+
+# v1.54.4 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.54.3 (2024-05-23)
+
+* **Bug Fix**: Prevent parsing failures for nonstandard `Expires` values in responses. If the SDK cannot parse the value set in the response header for this field it will now be returned as `nil`. A new field, `ExpiresString`, has been added that will retain the unparsed value from the response (regardless of whether it came back in a format recognized by the SDK).
+
+# v1.54.2 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.54.1 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.54.0 (2024-05-14)
+
+* **Feature**: Updated a few x-id in the http uri traits
+
+# v1.53.2 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.53.1 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.53.0 (2024-03-18)
+
+* **Feature**: Fix two issues with response root node names.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.52.1 (2024-03-15)
+
+* **Documentation**: Documentation updates for Amazon S3.
+
+# v1.52.0 (2024-03-13)
+
+* **Feature**: This release makes the default option for S3 on Outposts request signing to use the SigV4A algorithm when using AWS Common Runtime (CRT).
+
+# v1.51.4 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.51.3 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.51.2 (2024-03-04)
+
+* **Bug Fix**: Update internal/presigned-url dependency for corrected API name.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.51.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.51.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.50.3 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.50.2 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.50.1 (2024-02-19)
+
+* **Bug Fix**: Prevent potential panic caused by invalid comparison of credentials.
+
+# v1.50.0 (2024-02-16)
+
+* **Feature**: Add new ClientOptions field to waiter config which allows you to extend the config for operation calls made by waiters.
+
+# v1.49.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.48.1 (2024-01-24)
+
+* No change notes available for this release.
+
+# v1.48.0 (2024-01-05)
+
+* **Feature**: Support smithy sigv4a trait for codegen.
+
+# v1.47.8 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.47.7 (2023-12-20)
+
+* No change notes available for this release.
+
+# v1.47.6 (2023-12-18)
+
+* No change notes available for this release.
+
+# v1.47.5 (2023-12-08)
+
+* **Bug Fix**: Add non-vhostable buckets to request path when using legacy V1 endpoint resolver.
+* **Bug Fix**: Improve uniqueness of default S3Express sesssion credentials cache keying to prevent collision in multi-credential scenarios.
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.47.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.47.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
+# v1.47.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.47.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.47.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.46.0 (2023-11-28.2)
+
+* **Feature**: Add S3Express support.
+* **Feature**: Adds support for S3 Express One Zone.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.45.1 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.45.0 (2023-11-27)
+
+* **Feature**: Adding new params - Key and Prefix, to S3 API operations for supporting S3 Access Grants. Note - These updates will not change any of the existing S3 API functionality.
+
+# v1.44.0 (2023-11-21)
+
+* **Feature**: Add support for automatic date based partitioning in S3 Server Access Logs.
+* **Bug Fix**: Don't send MaxKeys/MaxUploads=0 when unspecified in ListObjectVersions and ListMultipartUploads paginators.
+
+# v1.43.1 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.43.0 (2023-11-17)
+
+* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162.
+* **Feature**: Removes all default 0 values for numbers and false values for booleans
+
+# v1.42.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.42.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.42.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.41.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.40.2 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.40.1 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.40.0 (2023-09-26)
+
+* **Feature**: This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK.
+
+# v1.39.0 (2023-09-20)
+
+* **Feature**: Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException
+
+# v1.38.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.38.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.37.1 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.37.0 (2023-07-13)
+
+* **Feature**: S3 Inventory now supports Object Access Control List and Object Owner as available object metadata fields in inventory reports.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.36.0 (2023-06-28)
+
+* **Feature**: The S3 LISTObjects, ListObjectsV2 and ListObjectVersions API now supports a new optional header x-amz-optional-object-attributes. If header contains RestoreStatus as the value, then S3 will include Glacier restore status i.e. isRestoreInProgress and RestoreExpiryDate in List response.
+
+# v1.35.0 (2023-06-16)
+
+* **Feature**: This release adds SDK support for request-payer request header and request-charged response header in the "GetBucketAccelerateConfiguration", "ListMultipartUploads", "ListObjects", "ListObjectsV2" and "ListObjectVersions" S3 APIs.
+
+# v1.34.1 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.34.0 (2023-06-13)
+
+* **Feature**: Integrate double encryption feature to SDKs.
+* **Bug Fix**: Fix HeadObject to return types.Nound when an object does not exist. Fixes [2084](https://github.com/aws/aws-sdk-go-v2/issues/2084)
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.1 (2023-05-04)
+
+* **Documentation**: Documentation updates for Amazon S3
+
+# v1.33.0 (2023-04-24)
+
+* **Feature**: added custom paginators for listMultipartUploads and ListObjectVersions
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.0 (2023-04-19)
+
+* **Feature**: Provides support for "Snow" Storage class.
+
+# v1.31.3 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.31.2 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.1 (2023-03-31)
+
+* **Documentation**: Documentation updates for Amazon S3
+
+# v1.31.0 (2023-03-21)
+
+* **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.6 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.5 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.30.4 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.3 (2023-02-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.2 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.1 (2023-01-23)
+
+* No change notes available for this release.
+
+# v1.30.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.29.6 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.5 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.4 (2022-11-22)
+
+* No change notes available for this release.
+
+# v1.29.3 (2022-11-16)
+
+* No change notes available for this release.
+
+# v1.29.2 (2022-11-10)
+
+* No change notes available for this release.
+
+# v1.29.1 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.0 (2022-10-21)
+
+* **Feature**: S3 on Outposts launches support for automatic bucket-style alias. You can use the automatic access point alias instead of an access point ARN for any object-level operation in an Outposts bucket.
+* **Bug Fix**: The SDK client has been updated to utilize the `aws.IsCredentialsProvider` function for determining if `aws.AnonymousCredentials` has been configured for the `CredentialProvider`.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.0 (2022-10-19)
+
+* **Feature**: Updates internal logic for constructing API endpoints. We have added rule-based endpoints and internal model parameters.
+
+# v1.27.11 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.10 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.9 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.8 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.7 (2022-08-30)
+
+* No change notes available for this release.
+
+# v1.27.6 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.5 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.4 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.3 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.2 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.0 (2022-07-01)
+
+* **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076).
+
+# v1.26.12 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.11 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.10 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.9 (2022-05-06)
+
+* No change notes available for this release.
+
+# v1.26.8 (2022-05-03)
+
+* **Documentation**: Documentation only update for doc bug fixes for the S3 API docs.
+
+# v1.26.7 (2022-04-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.5 (2022-04-12)
+
+* **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly.
+
+# v1.26.4 (2022-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2022-02-24)
+
+* **Feature**: API client updated
+* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.1 (2022-01-28)
+
+* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR.
+
+# v1.24.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Documentation**: API client updated
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.0 (2021-12-21)
+
+* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens.
+* **Feature**: Updated to latest service endpoints
+
+# v1.21.0 (2021-12-02)
+
+* **Feature**: API client updated
+* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2021-11-30)
+
+* **Feature**: API client updated
+
+# v1.19.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.0 (2021-11-12)
+
+* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature.
+
+# v1.18.0 (2021-11-06)
+
+* **Feature**: Support has been added for the SelectObjectContent API.
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Feature**: Updated service to latest API model.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2021-09-17)
+
+* **Feature**: Updated API client and endpoints to latest revision.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2021-09-10)
+
+* No change notes available for this release.
+
+# v1.15.0 (2021-09-02)
+
+* **Feature**: API client updated
+* **Feature**: Add support for S3 Multi-Region Access Point ARNs.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2021-08-27)
+
+* **Feature**: Updated API model to latest revision.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2021-08-19)
+
+* **Feature**: API client updated
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2021-08-04)
+
+* **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346))
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2021-06-04)
+
+* **Feature**: The handling of AccessPoint and Outpost ARNs have been updated.
+* **Feature**: Updated service client to latest API model.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2021-05-25)
+
+* **Feature**: API client updated
+
+# v1.8.0 (2021-05-20)
+
+* **Feature**: API client updated
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Feature**: Updated to latest service API model.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go
new file mode 100644
index 000000000..40035a43d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go
@@ -0,0 +1,1369 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/v4a"
+ acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
+ s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithydocument "github.com/aws/smithy-go/document"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net"
+ "net/http"
+ "sync/atomic"
+ "time"
+)
+
+const ServiceID = "S3"
+const ServiceAPIVersion = "2006-03-01"
+
+type operationMetrics struct {
+ Duration metrics.Float64Histogram
+ SerializeDuration metrics.Float64Histogram
+ ResolveIdentityDuration metrics.Float64Histogram
+ ResolveEndpointDuration metrics.Float64Histogram
+ SignRequestDuration metrics.Float64Histogram
+ DeserializeDuration metrics.Float64Histogram
+}
+
+func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram {
+ switch name {
+ case "client.call.duration":
+ return m.Duration
+ case "client.call.serialization_duration":
+ return m.SerializeDuration
+ case "client.call.resolve_identity_duration":
+ return m.ResolveIdentityDuration
+ case "client.call.resolve_endpoint_duration":
+ return m.ResolveEndpointDuration
+ case "client.call.signing_duration":
+ return m.SignRequestDuration
+ case "client.call.deserialization_duration":
+ return m.DeserializeDuration
+ default:
+ panic("unrecognized operation metric")
+ }
+}
+
+func timeOperationMetric[T any](
+ ctx context.Context, metric string, fn func() (T, error),
+ opts ...metrics.RecordMetricOption,
+) (T, error) {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ start := time.Now()
+ v, err := fn()
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ return v, err
+}
+
+func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ var ended bool
+ start := time.Now()
+ return func() {
+ if ended {
+ return
+ }
+ ended = true
+
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ }
+}
+
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
+ }
+}
+
+type operationMetricsKey struct{}
+
+func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) {
+ meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/s3")
+ om := &operationMetrics{}
+
+ var err error
+
+ om.Duration, err = operationMetricTimer(meter, "client.call.duration",
+ "Overall call duration (including retries and time to send or receive request and response body)")
+ if err != nil {
+ return nil, err
+ }
+ om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration",
+ "The time it takes to serialize a message body")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration",
+ "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration",
+ "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request")
+ if err != nil {
+ return nil, err
+ }
+ om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration",
+ "The time it takes to sign a request")
+ if err != nil {
+ return nil, err
+ }
+ om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration",
+ "The time it takes to deserialize a message body")
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(parent, operationMetricsKey{}, om), nil
+}
+
+func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) {
+ return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = desc
+ })
+}
+
+func getOperationMetrics(ctx context.Context) *operationMetrics {
+ return ctx.Value(operationMetricsKey{}).(*operationMetrics)
+}
+
+func operationTracer(p tracing.TracerProvider) tracing.Tracer {
+ return p.Tracer("github.com/aws/aws-sdk-go-v2/service/s3")
+}
+
+// Client provides the API client to make operations call for Amazon Simple
+// Storage Service.
+type Client struct {
+ options Options
+
+ // Difference between the time reported by the server and the client
+ timeOffset *atomic.Int64
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ resolveDefaultLogger(&options)
+
+ setResolvedDefaultsMode(&options)
+
+ resolveRetryer(&options)
+
+ resolveHTTPClient(&options)
+
+ resolveHTTPSignerV4(&options)
+
+ resolveEndpointResolverV2(&options)
+
+ resolveHTTPSignerV4a(&options)
+
+ resolveTracerProvider(&options)
+
+ resolveMeterProvider(&options)
+
+ resolveAuthSchemeResolver(&options)
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ finalizeRetryMaxAttempts(&options)
+
+ ignoreAnonymousAuth(&options)
+
+ resolveExpressCredentials(&options)
+
+ finalizeServiceEndpointAuthResolver(&options)
+
+ resolveAuthSchemes(&options)
+
+ client := &Client{
+ options: options,
+ }
+
+ finalizeExpressCredentials(&options, client)
+
+ initializeTimeOffsetResolver(client)
+
+ return client
+}
+
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+ return c.options.Copy()
+}
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
+ ctx = middleware.ClearStackValues(ctx)
+ ctx = middleware.WithServiceID(ctx, ServiceID)
+ ctx = middleware.WithOperationName(ctx, opID)
+
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ setSafeEventStreamClientLogMode(&options, opID)
+
+ finalizeOperationRetryMaxAttempts(&options, *c)
+
+ finalizeClientEndpointResolverOptions(&options)
+
+ finalizeOperationExpressCredentials(&options, *c)
+
+ finalizeOperationEndpointAuthResolver(&options)
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ ctx, err = withOperationMetrics(ctx, options.MeterProvider)
+ if err != nil {
+ return nil, metadata, err
+ }
+
+ tracer := operationTracer(options.TracerProvider)
+ spanName := fmt.Sprintf("%s.%s", ServiceID, opID)
+
+ ctx = tracing.WithOperationTracer(ctx, tracer)
+
+ ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) {
+ o.Kind = tracing.SpanKindClient
+ o.Properties.Set("rpc.system", "aws-api")
+ o.Properties.Set("rpc.method", opID)
+ o.Properties.Set("rpc.service", ServiceID)
+ })
+ endTimer := startMetricTimer(ctx, "client.call.duration")
+ defer endTimer()
+ defer span.End()
+
+ handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) {
+ o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3")
+ })
+ decorated := middleware.DecorateHandler(handler, stack)
+ result, metadata, err = decorated.Handle(ctx, params)
+ if err != nil {
+ span.SetProperty("exception.type", fmt.Sprintf("%T", err))
+ span.SetProperty("exception.message", err.Error())
+
+ var aerr smithy.APIError
+ if errors.As(err, &aerr) {
+ span.SetProperty("api.error_code", aerr.ErrorCode())
+ span.SetProperty("api.error_message", aerr.ErrorMessage())
+ span.SetProperty("api.error_fault", aerr.ErrorFault().String())
+ }
+
+ err = &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+
+ span.SetProperty("error", err != nil)
+ if err == nil {
+ span.SetStatus(tracing.SpanStatusOK)
+ } else {
+ span.SetStatus(tracing.SpanStatusError)
+ }
+
+ return result, metadata, err
+}
+
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+ return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+ return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+ return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = setOperationInput(ctx, in.Parameters)
+ return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %v", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %v", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+ if options.AuthSchemeResolver == nil {
+ options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+ }
+}
+
+func resolveAuthSchemes(options *Options) {
+ if options.AuthSchemes == nil {
+ options.AuthSchemes = []smithyhttp.AuthScheme{
+ internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+ Signer: options.HTTPSignerV4,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ internalauth.NewHTTPAuthScheme("com.amazonaws.s3#sigv4express", &s3cust.ExpressSigner{
+ Signer: options.HTTPSignerV4,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ internalauth.NewHTTPAuthScheme("aws.auth#sigv4a", &v4a.SignerAdapter{
+ Signer: options.httpSignerV4a,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ }
+ }
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+type legacyEndpointContextSetter struct {
+ LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+ return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.LegacyResolver != nil {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+ }
+
+ return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+ return stack.Initialize.Add(&legacyEndpointContextSetter{
+ LegacyResolver: o.EndpointResolver,
+ }, middleware.Before)
+}
+
+func resolveDefaultLogger(o *Options) {
+ if o.Logger != nil {
+ return
+ }
+ o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+func setResolvedDefaultsMode(o *Options) {
+ if len(o.resolvedDefaultsMode) > 0 {
+ return
+ }
+
+ var mode aws.DefaultsMode
+ mode.SetFromString(string(o.DefaultsMode))
+
+ if mode == aws.DefaultsModeAuto {
+ mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
+ }
+
+ o.resolvedDefaultsMode = mode
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ Region: cfg.Region,
+ DefaultsMode: cfg.DefaultsMode,
+ RuntimeEnvironment: cfg.RuntimeEnvironment,
+ HTTPClient: cfg.HTTPClient,
+ Credentials: cfg.Credentials,
+ APIOptions: cfg.APIOptions,
+ Logger: cfg.Logger,
+ ClientLogMode: cfg.ClientLogMode,
+ AppID: cfg.AppID,
+ RequestChecksumCalculation: cfg.RequestChecksumCalculation,
+ ResponseChecksumValidation: cfg.ResponseChecksumValidation,
+ }
+ resolveAWSRetryerProvider(cfg, &opts)
+ resolveAWSRetryMaxAttempts(cfg, &opts)
+ resolveAWSRetryMode(cfg, &opts)
+ resolveAWSEndpointResolver(cfg, &opts)
+ resolveUseARNRegion(cfg, &opts)
+ resolveDisableMultiRegionAccessPoints(cfg, &opts)
+ resolveDisableExpressAuth(cfg, &opts)
+ resolveUseDualStackEndpoint(cfg, &opts)
+ resolveUseFIPSEndpoint(cfg, &opts)
+ resolveBaseEndpoint(cfg, &opts)
+ return New(opts, optFns...)
+}
+
+func resolveHTTPClient(o *Options) {
+ var buildable *awshttp.BuildableClient
+
+ if o.HTTPClient != nil {
+ var ok bool
+ buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
+ if !ok {
+ return
+ }
+ } else {
+ buildable = awshttp.NewBuildableClient()
+ }
+
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
+ if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
+ dialer.Timeout = dialerTimeout
+ }
+ })
+
+ buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
+ if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
+ transport.TLSHandshakeTimeout = tlsHandshakeTimeout
+ }
+ })
+ }
+
+ o.HTTPClient = buildable
+}
+
+func resolveRetryer(o *Options) {
+ if o.Retryer != nil {
+ return
+ }
+
+ if len(o.RetryMode) == 0 {
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ o.RetryMode = modeConfig.RetryMode
+ }
+ }
+ if len(o.RetryMode) == 0 {
+ o.RetryMode = aws.RetryModeStandard
+ }
+
+ var standardOptions []func(*retry.StandardOptions)
+ if v := o.RetryMaxAttempts; v != 0 {
+ standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
+ so.MaxAttempts = v
+ })
+ }
+
+ switch o.RetryMode {
+ case aws.RetryModeAdaptive:
+ var adaptiveOptions []func(*retry.AdaptiveModeOptions)
+ if len(standardOptions) != 0 {
+ adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
+ ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
+ })
+ }
+ o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
+
+ default:
+ o.Retryer = retry.NewStandard(standardOptions...)
+ }
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+ if cfg.Retryer == nil {
+ return
+ }
+ o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSRetryMode(cfg aws.Config, o *Options) {
+ if len(cfg.RetryMode) == 0 {
+ return
+ }
+ o.RetryMode = cfg.RetryMode
+}
+func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
+ if cfg.RetryMaxAttempts == 0 {
+ return
+ }
+ o.RetryMaxAttempts = cfg.RetryMaxAttempts
+}
+
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
+ if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+ if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
+ return
+ }
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
+}
+
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion)
+ if len(options.AppID) > 0 {
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+ }
+
+ return nil
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
+
+type HTTPSignerV4 interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+ if o.HTTPSignerV4 != nil {
+ return
+ }
+ o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+ return v4.NewSigner(func(so *v4.SignerOptions) {
+ so.Logger = o.Logger
+ so.LogSigning = o.ClientLogMode.IsSigning()
+ so.DisableURIPathEscaping = true
+ })
+}
+
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+
+func addSpanRetryLoop(stack *middleware.Stack, options Options) error {
+ return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before)
+}
+
+type spanRetryLoop struct {
+ options Options
+}
+
+func (*spanRetryLoop) ID() string {
+ return "spanRetryLoop"
+}
+
+func (m *spanRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ middleware.FinalizeOutput, middleware.Metadata, error,
+) {
+ tracer := operationTracer(m.options.TracerProvider)
+ ctx, span := tracer.StartSpan(ctx, "RetryLoop")
+ defer span.End()
+
+ return next.HandleFinalize(ctx, in)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+ return nil
+ })
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+ return nil
+ })
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3")
+ })
+ if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
+ }
+ return nil
+}
+
+// resolves UseARNRegion S3 configuration
+func resolveUseARNRegion(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := s3sharedconfig.ResolveUseARNRegion(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.UseARNRegion = value
+ }
+ return nil
+}
+
+// resolves DisableMultiRegionAccessPoints S3 configuration
+func resolveDisableMultiRegionAccessPoints(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := s3sharedconfig.ResolveDisableMultiRegionAccessPoints(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.DisableMultiRegionAccessPoints = value
+ }
+ return nil
+}
+
+// resolves dual-stack endpoint configuration
+func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseDualStackEndpoint = value
+ }
+ return nil
+}
+
+// resolves FIPS endpoint configuration
+func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseFIPSEndpoint = value
+ }
+ return nil
+}
+
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+ if mode == aws.AccountIDEndpointModeDisabled {
+ return nil
+ }
+
+ if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+ return aws.String(ca.Credentials.AccountID)
+ }
+
+ return nil
+}
+
+type httpSignerV4a interface {
+ SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash,
+ service string, regionSet []string, signingTime time.Time,
+ optFns ...func(*v4a.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4a(o *Options) {
+ if o.httpSignerV4a != nil {
+ return
+ }
+ o.httpSignerV4a = newDefaultV4aSigner(*o)
+}
+
+func newDefaultV4aSigner(o Options) *v4a.Signer {
+ return v4a.NewSigner(func(so *v4a.SignerOptions) {
+ so.Logger = o.Logger
+ so.LogSigning = o.ClientLogMode.IsSigning()
+ })
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+ mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+ if err := stack.Build.Add(&mw, middleware.After); err != nil {
+ return err
+ }
+ return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+ c.timeOffset = new(atomic.Int64)
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ switch options.Retryer.(type) {
+ case *retry.Standard:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+ case *retry.AdaptiveMode:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+ }
+ return nil
+}
+
+func addRequestChecksumMetricsTracking(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ return stack.Build.Insert(&internalChecksum.RequestChecksumMetricsTracking{
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ UserAgent: ua,
+ }, "UserAgent", middleware.Before)
+}
+
+func addResponseChecksumMetricsTracking(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ return stack.Build.Insert(&internalChecksum.ResponseChecksumMetricsTracking{
+ ResponseChecksumValidation: options.ResponseChecksumValidation,
+ UserAgent: ua,
+ }, "UserAgent", middleware.Before)
+}
+
+type setCredentialSourceMiddleware struct {
+ ua *awsmiddleware.RequestUserAgent
+ options Options
+}
+
+func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" }
+
+func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource)
+ if !ok {
+ return next.HandleBuild(ctx, in)
+ }
+ providerSources := asProviderSource.ProviderSources()
+ for _, source := range providerSources {
+ m.ua.AddCredentialsSource(source)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+func addCredentialSource(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ mw := setCredentialSourceMiddleware{ua: ua, options: options}
+ return stack.Build.Insert(&mw, "UserAgent", middleware.Before)
+}
+
+func resolveTracerProvider(options *Options) {
+ if options.TracerProvider == nil {
+ options.TracerProvider = &tracing.NopTracerProvider{}
+ }
+}
+
+func resolveMeterProvider(options *Options) {
+ if options.MeterProvider == nil {
+ options.MeterProvider = metrics.NopMeterProvider{}
+ }
+}
+
+func addMetadataRetrieverMiddleware(stack *middleware.Stack) error {
+ return s3shared.AddMetadataRetrieverMiddleware(stack)
+}
+
+func add100Continue(stack *middleware.Stack, options Options) error {
+ return s3shared.Add100Continue(stack, options.ContinueHeaderThresholdBytes)
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
+// ComputedInputChecksumsMetadata provides information about the algorithms used
+// to compute the checksum(s) of the input payload.
+type ComputedInputChecksumsMetadata struct {
+ // ComputedChecksums is a map of algorithm name to checksum value of the computed
+ // input payload's checksums.
+ ComputedChecksums map[string]string
+}
+
+// GetComputedInputChecksumsMetadata retrieves from the result metadata the map of
+// algorithms and input payload checksums values.
+func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChecksumsMetadata, bool) {
+ values, ok := internalChecksum.GetComputedInputChecksums(m)
+ if !ok {
+ return ComputedInputChecksumsMetadata{}, false
+ }
+ return ComputedInputChecksumsMetadata{
+ ComputedChecksums: values,
+ }, true
+
+}
+
+func addInputChecksumMiddleware(stack *middleware.Stack, options internalChecksum.InputMiddlewareOptions) (err error) {
+ err = stack.Initialize.Add(&internalChecksum.SetupInputContext{
+ GetAlgorithm: options.GetAlgorithm,
+ RequireChecksum: options.RequireChecksum,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ }, middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ stack.Build.Remove("ContentChecksum")
+
+ inputChecksum := &internalChecksum.ComputeInputPayloadChecksum{
+ EnableTrailingChecksum: options.EnableTrailingChecksum,
+ EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash,
+ EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader,
+ }
+ if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil {
+ return err
+ }
+
+ if options.EnableTrailingChecksum {
+ trailerMiddleware := &internalChecksum.AddInputChecksumTrailer{
+ EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum,
+ EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash,
+ EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader,
+ }
+ if err := stack.Finalize.Insert(trailerMiddleware, inputChecksum.ID(), middleware.After); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ChecksumValidationMetadata contains metadata such as the checksum algorithm
+// used for data integrity validation.
+type ChecksumValidationMetadata struct {
+ // AlgorithmsUsed is the set of the checksum algorithms used to validate the
+ // response payload. The response payload must be completely read in order for the
+ // checksum validation to be performed. An error is returned by the operation
+ // output's response io.ReadCloser if the computed checksums are invalid.
+ AlgorithmsUsed []string
+}
+
+// GetChecksumValidationMetadata returns the set of algorithms that will be used
+// to validate the response payload with. The response payload must be completely
+// read in order for the checksum validation to be performed. An error is returned
+// by the operation output's response io.ReadCloser if the computed checksums are
+// invalid. Returns false if no checksum algorithm used metadata was found.
+func GetChecksumValidationMetadata(m middleware.Metadata) (ChecksumValidationMetadata, bool) {
+ values, ok := internalChecksum.GetOutputValidationAlgorithmsUsed(m)
+ if !ok {
+ return ChecksumValidationMetadata{}, false
+ }
+ return ChecksumValidationMetadata{
+ AlgorithmsUsed: append(make([]string, 0, len(values)), values...),
+ }, true
+
+}
+
+// nopGetBucketAccessor is no-op accessor for operation that don't support bucket
+// member as input
+func nopGetBucketAccessor(input interface{}) (*string, bool) {
+ return nil, false
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+ return s3shared.AddResponseErrorMiddleware(stack)
+}
+
+func disableAcceptEncodingGzip(stack *middleware.Stack) error {
+ return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{})
+}
+
+// ResponseError provides the HTTP centric error type wrapping the underlying
+// error with the HTTP response value and the deserialized RequestID.
+type ResponseError interface {
+ error
+
+ ServiceHostID() string
+ ServiceRequestID() string
+}
+
+var _ ResponseError = (*s3shared.ResponseError)(nil)
+
+// GetHostIDMetadata retrieves the host id from middleware metadata returns host
+// id as string along with a boolean indicating presence of hostId on middleware
+// metadata.
+func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) {
+ return s3shared.GetHostIDMetadata(metadata)
+}
+
+// HTTPPresignerV4 represents presigner interface used by presign url client
+type HTTPPresignerV4 interface {
+ PresignHTTP(
+ ctx context.Context, credentials aws.Credentials, r *http.Request,
+ payloadHash string, service string, region string, signingTime time.Time,
+ optFns ...func(*v4.SignerOptions),
+ ) (url string, signedHeader http.Header, err error)
+}
+
+// httpPresignerV4a represents sigv4a presigner interface used by presign url
+// client
+type httpPresignerV4a interface {
+ PresignHTTP(
+ ctx context.Context, credentials v4a.Credentials, r *http.Request,
+ payloadHash string, service string, regionSet []string, signingTime time.Time,
+ optFns ...func(*v4a.SignerOptions),
+ ) (url string, signedHeader http.Header, err error)
+}
+
+// PresignOptions represents the presign client options
+type PresignOptions struct {
+
+ // ClientOptions are list of functional options to mutate client options used by
+ // the presign client.
+ ClientOptions []func(*Options)
+
+ // Presigner is the presigner used by the presign url client
+ Presigner HTTPPresignerV4
+
+ // Expires sets the expiration duration for the generated presign url. This should
+ // be the duration in seconds the presigned URL should be considered valid for. If
+ // not set or set to zero, presign url would default to expire after 900 seconds.
+ Expires time.Duration
+
+ // presignerV4a is the presigner used by the presign url client
+ presignerV4a httpPresignerV4a
+}
+
+func (o PresignOptions) copy() PresignOptions {
+ clientOptions := make([]func(*Options), len(o.ClientOptions))
+ copy(clientOptions, o.ClientOptions)
+ o.ClientOptions = clientOptions
+ return o
+}
+
+// WithPresignClientFromClientOptions is a helper utility to retrieve a function
+// that takes PresignOption as input
+func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) {
+ return withPresignClientFromClientOptions(optFns).options
+}
+
+type withPresignClientFromClientOptions []func(*Options)
+
+func (w withPresignClientFromClientOptions) options(o *PresignOptions) {
+ o.ClientOptions = append(o.ClientOptions, w...)
+}
+
+// WithPresignExpires is a helper utility to append Expires value on presign
+// options optional function
+func WithPresignExpires(dur time.Duration) func(*PresignOptions) {
+ return withPresignExpires(dur).options
+}
+
+type withPresignExpires time.Duration
+
+func (w withPresignExpires) options(o *PresignOptions) {
+ o.Expires = time.Duration(w)
+}
+
+// PresignClient represents the presign url client
+type PresignClient struct {
+ client *Client
+ options PresignOptions
+}
+
+// NewPresignClient generates a presign client using provided API Client and
+// presign options
+func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient {
+ var options PresignOptions
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ if len(options.ClientOptions) != 0 {
+ c = New(c.options, options.ClientOptions...)
+ }
+
+ if options.Presigner == nil {
+ options.Presigner = newDefaultV4Signer(c.options)
+ }
+
+ if options.presignerV4a == nil {
+ options.presignerV4a = newDefaultV4aSigner(c.options)
+ }
+
+ return &PresignClient{
+ client: c,
+ options: options,
+ }
+}
+
+func withNopHTTPClientAPIOption(o *Options) {
+ o.HTTPClient = smithyhttp.NopClient{}
+}
+
+type presignContextPolyfillMiddleware struct {
+}
+
+func (*presignContextPolyfillMiddleware) ID() string {
+ return "presignContextPolyfill"
+}
+
+func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ schemeID := rscheme.Scheme.SchemeID()
+ ctx = s3cust.SetSignerVersion(ctx, schemeID)
+ if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" {
+ if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningName(ctx, sn)
+ }
+ if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningRegion(ctx, sr)
+ }
+ } else if schemeID == "aws.auth#sigv4a" {
+ if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningName(ctx, sn)
+ }
+ if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningRegion(ctx, sr[0])
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+type presignConverter PresignOptions
+
+func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) {
+ if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok {
+ stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID())
+ }
+ if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok {
+ stack.Finalize.Remove((*retry.Attempt)(nil).ID())
+ }
+ if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok {
+ stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID())
+ }
+ stack.Deserialize.Clear()
+ stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID())
+ stack.Build.Remove("UserAgent")
+ if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil {
+ return err
+ }
+
+ pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: options.Credentials,
+ Presigner: c.Presigner,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ })
+ if _, err := stack.Finalize.Swap("Signing", pmw); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil {
+ return err
+ }
+
+ // extended s3 presigning
+ signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: options.Credentials,
+ ExpressCredentials: options.ExpressCredentials,
+ V4Presigner: c.Presigner,
+ V4aPresigner: c.presignerV4a,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ })
+ err = s3cust.RegisterPreSigningMiddleware(stack, signermv)
+ if err != nil {
+ return err
+ }
+
+ if c.Expires < 0 {
+ return fmt.Errorf("presign URL duration must be 0 or greater, %v", c.Expires)
+ }
+ // add middleware to set expiration for s3 presigned url, if expiration is set to
+ // 0, this middleware sets a default expiration of 900 seconds
+ err = stack.Build.Add(&s3cust.AddExpiresOnPresignedURL{Expires: c.Expires}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = presignedurlcust.AddAsIsPresigningMiddleware(stack)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func withNoDefaultChecksumAPIOption(options *Options) {
+ options.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+ return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: o.ClientLogMode.IsRequest(),
+ LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
+ LogResponse: o.ClientLogMode.IsResponse(),
+ LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+}
+
+type disableHTTPSMiddleware struct {
+ DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+ return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+ req.URL.Scheme = "http"
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Finalize.Insert(&disableHTTPSMiddleware{
+ DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+type spanInitializeStart struct {
+}
+
+func (*spanInitializeStart) ID() string {
+ return "spanInitializeStart"
+}
+
+func (m *spanInitializeStart) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "Initialize")
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanInitializeEnd struct {
+}
+
+func (*spanInitializeEnd) ID() string {
+ return "spanInitializeEnd"
+}
+
+func (m *spanInitializeEnd) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanBuildRequestStart struct {
+}
+
+func (*spanBuildRequestStart) ID() string {
+ return "spanBuildRequestStart"
+}
+
+func (m *spanBuildRequestStart) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ middleware.SerializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "BuildRequest")
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type spanBuildRequestEnd struct {
+}
+
+func (*spanBuildRequestEnd) ID() string {
+ return "spanBuildRequestEnd"
+}
+
+func (m *spanBuildRequestEnd) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ middleware.BuildOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleBuild(ctx, in)
+}
+
+func addSpanInitializeStart(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before)
+}
+
+func addSpanInitializeEnd(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After)
+}
+
+func addSpanBuildRequestStart(stack *middleware.Stack) error {
+ return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before)
+}
+
+func addSpanBuildRequestEnd(stack *middleware.Stack) error {
+ return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go
new file mode 100644
index 000000000..3caa7dd7b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go
@@ -0,0 +1,359 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// This operation aborts a multipart upload. After a multipart upload is aborted,
+// no additional parts can be uploaded using that upload ID. The storage consumed
+// by any previously uploaded parts will be freed. However, if any part uploads are
+// currently in progress, those part uploads might or might not succeed. As a
+// result, it might be necessary to abort a given multipart upload multiple times
+// in order to completely free all storage consumed by all parts.
+//
+// To verify that all parts have been removed and prevent getting charged for the
+// part storage, you should call the [ListParts]API operation and ensure that the parts list
+// is empty.
+//
+// - Directory buckets - If multipart uploads in a directory bucket are in
+// progress, you can't delete the bucket until all the in-progress multipart
+// uploads are aborted or completed. To delete these in-progress multipart uploads,
+// use the ListMultipartUploads operation to list the in-progress multipart
+// uploads in the bucket and use the AbortMultipartUpload operation to abort all
+// the in-progress multipart uploads.
+//
+// - Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support
+// virtual-hosted-style requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - For information about permissions
+// required to use the multipart upload, see [Multipart Upload and Permissions]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following operations are related to AbortMultipartUpload :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [ListParts]
+//
+// [ListMultipartUploads]
+//
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) {
+ if params == nil {
+ params = &AbortMultipartUploadInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AbortMultipartUpload", params, optFns, c.addOperationAbortMultipartUploadMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AbortMultipartUploadOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AbortMultipartUploadInput struct {
+
+ // The bucket name to which the upload was taking place.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Key of the object for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // Upload ID that identifies the multipart upload.
+ //
+ // This member is required.
+ UploadId *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // If present, this header aborts an in progress multipart upload only if it was
+ // initiated on the provided timestamp. If the initiated timestamp of the multipart
+ // upload does not match the provided value, the operation returns a 412
+ // Precondition Failed error. If the initiated timestamp matches or if the
+ // multipart upload doesn’t exist, the operation returns a 204 Success (No Content)
+ // response.
+ //
+ // This functionality is only supported for directory buckets.
+ IfMatchInitiatedTime *time.Time
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ noSmithyDocumentSerde
+}
+
+func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type AbortMultipartUploadOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpAbortMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpAbortMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "AbortMultipartUpload"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAbortMultipartUpload(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *AbortMultipartUploadInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opAbortMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "AbortMultipartUpload",
+ }
+}
+
+// getAbortMultipartUploadBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getAbortMultipartUploadBucketMember(input interface{}) (*string, bool) {
+ in := input.(*AbortMultipartUploadInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addAbortMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getAbortMultipartUploadBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go
new file mode 100644
index 000000000..ef4198eed
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go
@@ -0,0 +1,643 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Completes a multipart upload by assembling previously uploaded parts.
+//
+// You first initiate the multipart upload and then upload all parts using the [UploadPart]
+// operation or the [UploadPartCopy]operation. After successfully uploading all relevant parts of
+// an upload, you call this CompleteMultipartUpload operation to complete the
+// upload. Upon receiving this request, Amazon S3 concatenates all the parts in
+// ascending order by part number to create a new object. In the
+// CompleteMultipartUpload request, you must provide the parts list and ensure that
+// the parts list is complete. The CompleteMultipartUpload API operation
+// concatenates the parts that you provide in the list. For each part in the list,
+// you must provide the PartNumber value and the ETag value that are returned
+// after that part was uploaded.
+//
+// The processing of a CompleteMultipartUpload request could take several minutes
+// to finalize. After Amazon S3 begins processing the request, it sends an HTTP
+// response header that specifies a 200 OK response. While processing is in
+// progress, Amazon S3 periodically sends white space characters to keep the
+// connection from timing out. A request could fail after the initial 200 OK
+// response has been sent. This means that a 200 OK response can contain either a
+// success or an error. The error response might be embedded in the 200 OK
+// response. If you call this API operation directly, make sure to design your
+// application to parse the contents of the response and handle it appropriately.
+// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect
+// the embedded error and apply error handling per your configuration settings
+// (including automatically retrying the request as appropriate). If the condition
+// persists, the SDKs throw an exception (or, for the SDKs that don't use
+// exceptions, they return an error).
+//
+// Note that if CompleteMultipartUpload fails, applications should be prepared to
+// retry any failed requests (including 500 error responses). For more information,
+// see [Amazon S3 Error Best Practices].
+//
+// You can't use Content-Type: application/x-www-form-urlencoded for the
+// CompleteMultipartUpload requests. Also, if you don't provide a Content-Type
+// header, CompleteMultipartUpload can still return a 200 OK response.
+//
+// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+// - General purpose bucket permissions - For information about permissions
+// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide.
+//
+// If you provide an [additional checksum value]in your MultipartUpload requests and the object is encrypted
+//
+// with Key Management Service, you must have permission to use the kms:Decrypt
+// action for the CompleteMultipartUpload request to succeed.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// If the object is encrypted with SSE-KMS, you must also have the
+//
+// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies
+// and KMS key policies for the KMS key.
+//
+// Special errors
+//
+// - Error Code: EntityTooSmall
+//
+// - Description: Your proposed upload is smaller than the minimum allowed
+// object size. Each part must be at least 5 MB in size, except the last part.
+//
+// - HTTP Status Code: 400 Bad Request
+//
+// - Error Code: InvalidPart
+//
+// - Description: One or more of the specified parts could not be found. The
+// part might not have been uploaded, or the specified ETag might not have matched
+// the uploaded part's ETag.
+//
+// - HTTP Status Code: 400 Bad Request
+//
+// - Error Code: InvalidPartOrder
+//
+// - Description: The list of parts was not in ascending order. The parts list
+// must be specified in order by part number.
+//
+// - HTTP Status Code: 400 Bad Request
+//
+// - Error Code: NoSuchUpload
+//
+// - Description: The specified multipart upload does not exist. The upload ID
+// might be invalid, or the multipart upload might have been aborted or completed.
+//
+// - HTTP Status Code: 404 Not Found
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following operations are related to CompleteMultipartUpload :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [AbortMultipartUpload]
+//
+// [ListParts]
+//
+// [ListMultipartUploads]
+//
+// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
+// [Amazon S3 Error Best Practices]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [additional checksum value]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html
+// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+//
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) {
+ if params == nil {
+ params = &CompleteMultipartUploadInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CompleteMultipartUpload", params, optFns, c.addOperationCompleteMultipartUploadMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CompleteMultipartUploadOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CompleteMultipartUploadInput struct {
+
+ // Name of the bucket to which the multipart upload was initiated.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // ID for the initiated multipart upload.
+ //
+ // This member is required.
+ UploadId *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum
+ // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // This header specifies the checksum type of the object, which determines how
+ // part-level checksums are combined to create an object-level checksum for
+ // multipart objects. You can use this header as a data integrity check to verify
+ // that the checksum type that is received is the same checksum that was specified.
+ // If the checksum type doesn’t match the checksum type that was specified for the
+ // object during the CreateMultipartUpload request, it’ll result in a BadDigest
+ // error. For more information, see Checking object integrity in the Amazon S3 User
+ // Guide.
+ ChecksumType types.ChecksumType
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Uploads the object only if the ETag (entity tag) value provided during the
+ // WRITE operation matches the ETag of the object in S3. If the ETag values do not
+ // match, the operation returns a 412 Precondition Failed error.
+ //
+ // If a conflicting operation occurs during the upload S3 returns a 409
+ // ConditionalRequestConflict response. On a 409 failure you should fetch the
+ // object's ETag, re-initiate the multipart upload with CreateMultipartUpload , and
+ // re-upload each part.
+ //
+ // Expects the ETag value as a string.
+ //
+ // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3
+ // User Guide.
+ //
+ // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfMatch *string
+
+ // Uploads the object only if the object key name does not already exist in the
+ // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error.
+ //
+ // If a conflicting operation occurs during the upload S3 returns a 409
+ // ConditionalRequestConflict response. On a 409 failure you should re-initiate the
+ // multipart upload with CreateMultipartUpload and re-upload each part.
+ //
+ // Expects the '*' (asterisk) character.
+ //
+ // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3
+ // User Guide.
+ //
+ // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfNoneMatch *string
+
+ // The expected total object size of the multipart upload request. If there’s a
+ // mismatch between the specified object size value and the actual object size
+ // value, it results in an HTTP 400 InvalidRequest error.
+ MpuObjectSize *int64
+
+ // The container for the multipart upload request information.
+ MultipartUpload *types.CompletedMultipartUpload
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // The server-side encryption (SSE) algorithm used to encrypt the object. This
+ // parameter is required only when the object was created using a checksum
+ // algorithm or if your bucket policy requires the use of SSE-C. For more
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key
+ SSECustomerAlgorithm *string
+
+ // The server-side encryption (SSE) customer managed key. This parameter is needed
+ // only when the object was created using a checksum algorithm. For more
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerKey *string
+
+ // The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ // needed only when the object was created using a checksum algorithm. For more
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerKeyMD5 *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type CompleteMultipartUploadOutput struct {
+
+ // The name of the bucket that contains the newly created object. Does not return
+ // the access point ARN or access point alias if used.
+ //
+ // Access points are not supported by directory buckets.
+ Bucket *string
+
+ // Indicates whether the multipart upload uses an S3 Bucket Key for server-side
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only
+ // be present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only
+ // present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32C *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum
+ // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use the API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA256 *string
+
+ // The checksum type, which determines how part-level checksums are combined to
+ // create an object-level checksum for multipart objects. You can use this header
+ // as a data integrity check to verify that the checksum type that is received is
+ // the same checksum type that was specified during the CreateMultipartUpload
+ // request. For more information, see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType types.ChecksumType
+
+ // Entity tag that identifies the newly created object's data. Objects with
+ // different object data will have different entity tags. The entity tag is an
+ // opaque string. The entity tag may or may not be an MD5 digest of the object
+ // data. If the entity tag is not an MD5 digest of the object data, it will contain
+ // one or more nonhexadecimal characters and/or will consist of less than 32 or
+ // more than 32 hexadecimal digits. For more information about how the entity tag
+ // is calculated, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ETag *string
+
+ // If the object expiration is configured, this will contain the expiration date (
+ // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded.
+ //
+ // This functionality is not supported for directory buckets.
+ Expiration *string
+
+ // The object key of the newly created object.
+ Key *string
+
+ // The URI that identifies the newly created object.
+ Location *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // If present, indicates the ID of the KMS key that was used for object encryption.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon S3
+ // (for example, AES256 , aws:kms ).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Version ID of the newly created object, in case the bucket has versioning
+ // turned on.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCompleteMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCompleteMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CompleteMultipartUpload"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCompleteMultipartUpload(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *CompleteMultipartUploadInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opCompleteMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CompleteMultipartUpload",
+ }
+}
+
+// getCompleteMultipartUploadBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getCompleteMultipartUploadBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CompleteMultipartUploadInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCompleteMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCompleteMultipartUploadBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go
new file mode 100644
index 000000000..2d7d12c3e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go
@@ -0,0 +1,1059 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Creates a copy of an object that is already stored in Amazon S3.
+//
+// You can store individual objects of up to 5 TB in Amazon S3. You create a copy
+// of your object up to 5 GB in size in a single atomic action using this API.
+// However, to copy an object greater than 5 GB, you must use the multipart upload
+// Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API].
+//
+// You can copy individual objects between general purpose buckets, between
+// directory buckets, and between general purpose buckets and directory buckets.
+//
+// - Amazon S3 supports copy operations using Multi-Region Access Points only as
+// a destination when using the Multi-Region Access Point ARN.
+//
+// - Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support
+// virtual-hosted-style requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// - VPC endpoints don't support cross-Region requests (including copies). If
+// you're using VPC endpoints, your source and destination buckets should be in the
+// same Amazon Web Services Region as your VPC endpoint.
+//
+// Both the Region that you want to copy the object from and the Region that you
+// want to copy the object to must be enabled for your account. For more
+// information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon
+// Web Services Account Management Guide.
+//
+// Amazon S3 transfer acceleration does not support cross-Region copies. If you
+// request a cross-Region copy using a transfer acceleration endpoint, you get a
+// 400 Bad Request error. For more information, see [Transfer Acceleration].
+//
+// Authentication and authorization All CopyObject requests must be authenticated
+// and signed by using IAM credentials (access key ID and secret access key for the
+// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source
+// , must be signed. For more information, see [REST Authentication].
+//
+// Directory buckets - You must use the IAM credentials to authenticate and
+// authorize your access to the CopyObject API operation, instead of using the
+// temporary security credentials through the CreateSession API operation.
+//
+// Amazon Web Services CLI or SDKs handles authentication and authorization on
+// your behalf.
+//
+// Permissions You must have read access to the source object and write access to
+// the destination bucket.
+//
+// - General purpose bucket permissions - You must have permissions in an IAM
+// policy based on the source and destination bucket types in a CopyObject
+// operation.
+//
+// - If the source object is in a general purpose bucket, you must have
+// s3:GetObject permission to read the source object that is being copied.
+//
+// - If the destination bucket is a general purpose bucket, you must have
+// s3:PutObject permission to write the object copy to the destination bucket.
+//
+// - Directory bucket permissions - You must have permissions in a bucket policy
+// or an IAM identity-based policy based on the source and destination bucket types
+// in a CopyObject operation.
+//
+// - If the source object that you want to copy is in a directory bucket, you
+// must have the s3express:CreateSession permission in the Action element of a
+// policy to read the object. By default, the session is in the ReadWrite mode.
+// If you want to restrict the access, you can explicitly set the
+// s3express:SessionMode condition key to ReadOnly on the copy source bucket.
+//
+// - If the copy destination is a directory bucket, you must have the
+// s3express:CreateSession permission in the Action element of a policy to write
+// the object to the destination. The s3express:SessionMode condition key can't
+// be set to ReadOnly on the copy destination bucket.
+//
+// If the object is encrypted with SSE-KMS, you must also have the
+//
+// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies
+// and KMS key policies for the KMS key.
+//
+// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// Response and special errors When the request is an HTTP 1.1 request, the
+// response is chunk encoded. When the request is not an HTTP 1.1 request, the
+// response would not contain the Content-Length . You always need to read the
+// entire response body to check if the copy succeeds.
+//
+// - If the copy is successful, you receive a response with information about
+// the copied object.
+//
+// - A copy request might return an error when Amazon S3 receives the copy
+// request or while Amazon S3 is copying the files. A 200 OK response can contain
+// either a success or an error.
+//
+// - If the error occurs before the copy action starts, you receive a standard
+// Amazon S3 error.
+//
+// - If the error occurs during the copy operation, the error response is
+// embedded in the 200 OK response. For example, in a cross-region copy, you may
+// encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3]
+// . The 200 OK status code means the copy was accepted, but it doesn't mean the
+// copy is complete. Another example is when you disconnect from Amazon S3 before
+// the copy is complete, Amazon S3 might cancel the copy and you may receive a
+// 200 OK response. You must stay connected to Amazon S3 until the entire
+// response is successfully received and processed.
+//
+// If you call this API operation directly, make sure to design your application
+//
+// to parse the content of the response and handle it appropriately. If you use
+// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the
+// embedded error and apply error handling per your configuration settings
+// (including automatically retrying the request as appropriate). If the condition
+// persists, the SDKs throw an exception (or, for the SDKs that don't use
+// exceptions, they return an error).
+//
+// Charge The copy request charge is based on the storage class and Region that
+// you specify for the destination object. The request can also result in a data
+// retrieval charge for the source if the source storage class bills for data
+// retrieval. If the copy source is in a different region, the data transfer is
+// billed to the copy source account. For pricing information, see [Amazon S3 pricing].
+//
+// HTTP Host header syntax
+//
+// - Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// - Amazon S3 on Outposts - When you use this action with S3 on Outposts
+// through the REST API, you must direct requests to the S3 on Outposts hostname.
+// The S3 on Outposts hostname takes the form
+// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The
+// hostname isn't required when you use the Amazon Web Services CLI or SDKs.
+//
+// The following operations are related to CopyObject :
+//
+// [PutObject]
+//
+// [GetObject]
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html
+// [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror
+// [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
+// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone
+// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/
+func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) {
+ if params == nil {
+ params = &CopyObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CopyObject", params, optFns, c.addOperationCopyObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CopyObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CopyObjectInput struct {
+
+ // The name of the destination bucket.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Copying objects across different Amazon Web Services Regions isn't supported
+ // when the source or destination bucket is in Amazon Web Services Local Zones. The
+ // source and destination buckets must have the same parent Amazon Web Services
+ // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code
+ // InvalidRequest .
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must use the
+ // Outpost bucket access point ARN or the access point alias for the destination
+ // bucket.
+ //
+ // You can only copy objects within the same Outpost bucket. It's not supported to
+ // copy objects across different Amazon Web Services Outposts, between buckets on
+ // the same Outposts, or between Outposts buckets and any other bucket types. For
+ // more information about S3 on Outposts, see [What is S3 on Outposts?]in the S3 on Outposts guide. When
+ // you use this action with S3 on Outposts through the REST API, you must direct
+ // requests to the S3 on Outposts hostname, in the format
+ // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The
+ // hostname isn't required when you use the Amazon Web Services CLI or SDKs.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the source object for the copy operation. The source object can be up
+ // to 5 GB. If the source object is an object that was uploaded by using a
+ // multipart upload, the object copy will be a single part object after the source
+ // object is copied to the destination bucket.
+ //
+ // You specify the value of the copy source in one of two formats, depending on
+ // whether you want to access the source object through an [access point]:
+ //
+ // - For objects not accessed through an access point, specify the name of the
+ // source bucket and the key of the source object, separated by a slash (/). For
+ // example, to copy the object reports/january.pdf from the general purpose
+ // bucket awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value
+ // must be URL-encoded. To copy the object reports/january.pdf from the directory
+ // bucket awsexamplebucket--use1-az5--x-s3 , use
+ // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be
+ // URL-encoded.
+ //
+ // - For objects accessed through access points, specify the Amazon Resource
+ // Name (ARN) of the object as accessed through the access point, in the format
+ // arn:aws:s3:::accesspoint//object/ . For example, to copy the object
+ // reports/january.pdf through access point my-access-point owned by account
+ // 123456789012 in Region us-west-2 , use the URL encoding of
+ // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
+ // . The value must be URL encoded.
+ //
+ // - Amazon S3 supports copy operations using Access points only when the source
+ // and destination buckets are in the same Amazon Web Services Region.
+ //
+ // - Access points are not supported by directory buckets.
+ //
+ // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the
+ // ARN of the object as accessed in the format
+ // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object
+ // reports/january.pdf through outpost my-outpost owned by account 123456789012
+ // in Region us-west-2 , use the URL encoding of
+ // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
+ // . The value must be URL-encoded.
+ //
+ // If your source bucket versioning is enabled, the x-amz-copy-source header by
+ // default identifies the current version of an object to copy. If the current
+ // version is a delete marker, Amazon S3 behaves as if the object was deleted. To
+ // copy a different version, use the versionId query parameter. Specifically,
+ // append ?versionId= to the value (for example,
+ // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
+ // ). If you don't specify a version ID, Amazon S3 copies the latest version of the
+ // source object.
+ //
+ // If you enable versioning on the destination bucket, Amazon S3 generates a
+ // unique version ID for the copied object. This version ID is different from the
+ // version ID of the source object. Amazon S3 returns the version ID of the copied
+ // object in the x-amz-version-id response header in the response.
+ //
+ // If you do not enable versioning or suspend it on the destination bucket, the
+ // version ID that Amazon S3 generates in the x-amz-version-id response header is
+ // always null.
+ //
+ // Directory buckets - S3 Versioning isn't enabled and supported for directory
+ // buckets.
+ //
+ // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html
+ //
+ // This member is required.
+ CopySource *string
+
+ // The key of the destination object.
+ //
+ // This member is required.
+ Key *string
+
+ // The canned access control list (ACL) to apply to the object.
+ //
+ // When you copy an object, the ACL metadata is not preserved and is set to private
+ // by default. Only the owner has full access control. To override the default ACL
+ // setting, specify a new ACL when you generate a copy request. For more
+ // information, see [Using ACLs].
+ //
+ // If the destination bucket that you're copying objects to uses the bucket owner
+ // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect
+ // permissions. Buckets that use this setting only accept PUT requests that don't
+ // specify an ACL or PUT requests that specify bucket owner full control ACLs,
+ // such as the bucket-owner-full-control canned ACL or an equivalent form of this
+ // ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3
+ // User Guide.
+ //
+ // - If your destination bucket uses the bucket owner enforced setting for
+ // Object Ownership, all objects written to the bucket by any account will be owned
+ // by the bucket owner.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
+ // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+ ACL types.ObjectCannedACL
+
+ // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
+ // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
+ // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object.
+ //
+ // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object
+ // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t
+ // affect bucket-level settings for S3 Bucket Key.
+ //
+ // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide.
+ //
+ // Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS
+ // encrypted objects from general purpose buckets to directory buckets, from
+ // directory buckets to general purpose buckets, or between directory buckets,
+ // through [CopyObject]. In this case, Amazon S3 makes a call to KMS every time a copy request
+ // is made for a KMS-encrypted object.
+ //
+ // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
+ // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+ BucketKeyEnabled *bool
+
+ // Specifies the caching behavior along the request/reply chain.
+ CacheControl *string
+
+ // Indicates the algorithm that you want Amazon S3 to use to create the checksum
+ // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // When you copy an object, if the source object has a checksum, that checksum
+ // value will be copied to the new object by default. If the CopyObject request
+ // does not include this x-amz-checksum-algorithm header, the checksum algorithm
+ // will be copied from the source object to the destination object (if it's present
+ // on the source object). You can optionally specify a different checksum algorithm
+ // to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported
+ // values will respond with the HTTP status code 400 Bad Request .
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // Specifies presentational information for the object. Indicates whether an
+ // object should be displayed in a web browser or downloaded as a file. It allows
+ // specifying the desired filename for the downloaded file.
+ ContentDisposition *string
+
+ // Specifies what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field.
+ //
+ // For directory buckets, only the aws-chunked value is supported in this header
+ // field.
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // A standard MIME type that describes the format of the object data.
+ ContentType *string
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ //
+ // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+ // headers are present in the request and evaluate as follows, Amazon S3 returns
+ // 200 OK and copies the data:
+ //
+ // - x-amz-copy-source-if-match condition evaluates to true
+ //
+ // - x-amz-copy-source-if-unmodified-since condition evaluates to false
+ CopySourceIfMatch *string
+
+ // Copies the object if it has been modified since the specified time.
+ //
+ // If both the x-amz-copy-source-if-none-match and
+ // x-amz-copy-source-if-modified-since headers are present in the request and
+ // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response
+ // code:
+ //
+ // - x-amz-copy-source-if-none-match condition evaluates to false
+ //
+ // - x-amz-copy-source-if-modified-since condition evaluates to true
+ CopySourceIfModifiedSince *time.Time
+
+ // Copies the object if its entity tag (ETag) is different than the specified ETag.
+ //
+ // If both the x-amz-copy-source-if-none-match and
+ // x-amz-copy-source-if-modified-since headers are present in the request and
+ // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response
+ // code:
+ //
+ // - x-amz-copy-source-if-none-match condition evaluates to false
+ //
+ // - x-amz-copy-source-if-modified-since condition evaluates to true
+ CopySourceIfNoneMatch *string
+
+ // Copies the object if it hasn't been modified since the specified time.
+ //
+ // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+ // headers are present in the request and evaluate as follows, Amazon S3 returns
+ // 200 OK and copies the data:
+ //
+ // - x-amz-copy-source-if-match condition evaluates to true
+ //
+ // - x-amz-copy-source-if-unmodified-since condition evaluates to false
+ CopySourceIfUnmodifiedSince *time.Time
+
+ // Specifies the algorithm to use when decrypting the source object (for example,
+ // AES256 ).
+ //
+ // If the source object for the copy is stored in Amazon S3 using SSE-C, you must
+ // provide the necessary encryption information in your request so that Amazon S3
+ // can decrypt the object for copying.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
+ CopySourceSSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be the same
+ // one that was used when the source object was created.
+ //
+ // If the source object for the copy is stored in Amazon S3 using SSE-C, you must
+ // provide the necessary encryption information in your request so that Amazon S3
+ // can decrypt the object for copying.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
+ CopySourceSSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ //
+ // If the source object for the copy is stored in Amazon S3 using SSE-C, you must
+ // provide the necessary encryption information in your request so that Amazon S3
+ // can decrypt the object for copying.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
+ CopySourceSSECustomerKeyMD5 *string
+
+ // The account ID of the expected destination bucket owner. If the account ID that
+ // you provide does not match the actual owner of the destination bucket, the
+ // request fails with the HTTP status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The account ID of the expected source bucket owner. If the account ID that you
+ // provide does not match the actual owner of the source bucket, the request fails
+ // with the HTTP status code 403 Forbidden (access denied).
+ ExpectedSourceBucketOwner *string
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ GrantFullControl *string
+
+ // Allows grantee to read the object data and its metadata.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ GrantRead *string
+
+ // Allows grantee to read the object ACL.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ GrantReadACP *string
+
+ // Allows grantee to write the ACL for the applicable object.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ GrantWriteACP *string
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]string
+
+ // Specifies whether the metadata is copied from the source object or replaced
+ // with metadata that's provided in the request. When copying an object, you can
+ // preserve all metadata (the default) or specify new metadata. If this header
+ // isn’t specified, COPY is the default behavior.
+ //
+ // General purpose bucket - For general purpose buckets, when you grant
+ // permissions, you can use the s3:x-amz-metadata-directive condition key to
+ // enforce certain metadata behavior when objects are uploaded. For more
+ // information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide.
+ //
+ // x-amz-website-redirect-location is unique to each object and is not copied when
+ // using the x-amz-metadata-directive header. To copy the value, you must specify
+ // x-amz-website-redirect-location in the request header.
+ //
+ // [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html
+ MetadataDirective types.MetadataDirective
+
+ // Specifies whether you want to apply a legal hold to the object copy.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // The Object Lock mode that you want to apply to the object copy.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockMode types.ObjectLockMode
+
+ // The date and time when you want the Object Lock of the object copy to expire.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockRetainUntilDate *time.Time
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use when encrypting the object (for example, AES256 ).
+ //
+ // When you perform a CopyObject operation, if you want to use a different type of
+ // encryption setting for the target object, you can specify appropriate
+ // encryption-related headers to encrypt the target object with an Amazon S3
+ // managed key, a KMS key, or a customer-provided key. If the encryption setting in
+ // your request is different from the default encryption configuration of the
+ // destination bucket, the encryption setting in your request takes precedence.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded. Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ SSECustomerKeyMD5 *string
+
+ // Specifies the Amazon Web Services KMS Encryption Context as an additional
+ // encryption context to use for the destination object encryption. The value of
+ // this header is a base64-encoded UTF-8 string holding JSON with the encryption
+ // context key-value pairs.
+ //
+ // General purpose buckets - This value must be explicitly added to specify
+ // encryption context for CopyObject requests if you want an additional encryption
+ // context for your destination object. The additional encryption context of the
+ // source object won't be copied to the destination object. For more information,
+ // see [Encryption context]in the Amazon S3 User Guide.
+ //
+ // Directory buckets - You can optionally provide an explicit encryption context
+ // value. The value must match the default encryption context - the bucket Amazon
+ // Resource Name (ARN). An additional encryption context value is not supported.
+ //
+ // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context
+ SSEKMSEncryptionContext *string
+
+ // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object
+ // encryption. All GET and PUT requests for an object protected by KMS will fail if
+ // they're not made via SSL or using SigV4. For information about configuring any
+ // of the officially supported Amazon Web Services SDKs and Amazon Web Services
+ // CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide.
+ //
+ // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify
+ // the x-amz-server-side-encryption header to aws:kms . Then, the
+ // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's
+ // default KMS customer managed key ID. If you want to explicitly set the
+ // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's
+ // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS
+ // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3
+ // ) isn't supported.
+ //
+ // Incorrect key specification results in an HTTP 400 Bad Request error.
+ //
+ // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+ // [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing this object in Amazon
+ // S3. Unrecognized or unsupported values won’t write a destination object and will
+ // receive a 400 Bad Request response.
+ //
+ // Amazon S3 automatically encrypts all new objects that are copied to an S3
+ // bucket. When copying an object, if you don't specify encryption information in
+ // your copy request, the encryption setting of the target object is set to the
+ // default encryption configuration of the destination bucket. By default, all
+ // buckets have a base level of encryption configuration that uses server-side
+ // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a
+ // different default encryption configuration, Amazon S3 uses the corresponding
+ // encryption key to encrypt the target object copy.
+ //
+ // With server-side encryption, Amazon S3 encrypts your data as it writes your
+ // data to disks in its data centers and decrypts the data when you access it. For
+ // more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide.
+ //
+ // General purpose buckets
+ //
+ // - For general purpose buckets, there are the following supported options for
+ // server-side encryption: server-side encryption with Key Management Service (KMS)
+ // keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS
+ // keys (DSSE-KMS), and server-side encryption with customer-provided encryption
+ // keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided
+ // key to encrypt the target object copy.
+ //
+ // - When you perform a CopyObject operation, if you want to use a different type
+ // of encryption setting for the target object, you can specify appropriate
+ // encryption-related headers to encrypt the target object with an Amazon S3
+ // managed key, a KMS key, or a customer-provided key. If the encryption setting in
+ // your request is different from the default encryption configuration of the
+ // destination bucket, the encryption setting in your request takes precedence.
+ //
+ // Directory buckets
+ //
+ // - For directory buckets, there are only two supported options for server-side
+ // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (
+ // AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). We
+ // recommend that the bucket's default encryption uses the desired encryption
+ // configuration and you don't override the bucket default encryption in your
+ // CreateSession requests or PUT object requests. Then, new objects are
+ // automatically encrypted with the desired encryption settings. For more
+ // information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the
+ // encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads].
+ //
+ // - To encrypt new object copies to a directory bucket with SSE-KMS, we
+ // recommend you specify SSE-KMS as the directory bucket's default encryption
+ // configuration with a KMS key (specifically, a [customer managed key]). The [Amazon Web Services managed key]( aws/s3 ) isn't
+ // supported. Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket
+ // for the lifetime of the bucket. After you specify a customer managed key for
+ // SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS
+ // configuration. Then, when you perform a CopyObject operation and want to
+ // specify server-side encryption settings for new object copies with SSE-KMS in
+ // the encryption-related request headers, you must ensure the encryption key is
+ // the same customer managed key that you specified for the directory bucket's
+ // default encryption configuration.
+ //
+ // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
+ // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html
+ // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+ // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
+ // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
+ ServerSideEncryption types.ServerSideEncryption
+
+ // If the x-amz-storage-class header is not used, the copied object will be stored
+ // in the STANDARD Storage Class by default. The STANDARD storage class provides
+ // high durability and high availability. Depending on performance needs, you can
+ // specify a different Storage Class.
+ //
+ // - Directory buckets - For directory buckets, only the S3 Express One Zone
+ // storage class is supported to store newly created objects. Unsupported storage
+ // class values won't write a destination object and will respond with the HTTP
+ // status code 400 Bad Request .
+ //
+ // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class.
+ //
+ // You can use the CopyObject action to change the storage class of an object that
+ // is already stored in Amazon S3 by using the x-amz-storage-class header. For
+ // more information, see [Storage Classes]in the Amazon S3 User Guide.
+ //
+ // Before using an object as a source object for the copy operation, you must
+ // restore a copy of it if it meets any of the following conditions:
+ //
+ // - The storage class of the source object is GLACIER or DEEP_ARCHIVE .
+ //
+ // - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is
+ // Archive Access or Deep Archive Access .
+ //
+ // For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
+ // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
+ // [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html
+ // [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition
+ StorageClass types.StorageClass
+
+ // The tag-set for the object copy in the destination bucket. This value must be
+ // used in conjunction with the x-amz-tagging-directive if you choose REPLACE for
+ // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive
+ // , you don't need to set the x-amz-tagging header, because the tag-set will be
+ // copied from the source object directly. The tag-set must be encoded as URL Query
+ // parameters.
+ //
+ // The default value is the empty value.
+ //
+ // Directory buckets - For directory buckets in a CopyObject operation, only the
+ // empty tag-set is supported. Any requests that attempt to write non-empty tags
+ // into directory buckets will receive a 501 Not Implemented status code. When the
+ // destination bucket is a directory bucket, you will receive a 501 Not Implemented
+ // response in any of the following situations:
+ //
+ // - When you attempt to COPY the tag-set from an S3 source object that has
+ // non-empty tags.
+ //
+ // - When you attempt to REPLACE the tag-set of a source object and set a
+ // non-empty value to x-amz-tagging .
+ //
+ // - When you don't set the x-amz-tagging-directive header and the source object
+ // has non-empty tags. This is because the default value of
+ // x-amz-tagging-directive is COPY .
+ //
+ // Because only the empty tag-set is supported for directory buckets in a
+ // CopyObject operation, the following situations are allowed:
+ //
+ // - When you attempt to COPY the tag-set from a directory bucket source object
+ // that has no tags to a general purpose bucket. It copies an empty tag-set to the
+ // destination object.
+ //
+ // - When you attempt to REPLACE the tag-set of a directory bucket source object
+ // and set the x-amz-tagging value of the directory bucket destination object to
+ // empty.
+ //
+ // - When you attempt to REPLACE the tag-set of a general purpose bucket source
+ // object that has non-empty tags and set the x-amz-tagging value of the
+ // directory bucket destination object to empty.
+ //
+ // - When you attempt to REPLACE the tag-set of a directory bucket source object
+ // and don't set the x-amz-tagging value of the directory bucket destination
+ // object. This is because the default value of x-amz-tagging is the empty value.
+ Tagging *string
+
+ // Specifies whether the object tag-set is copied from the source object or
+ // replaced with the tag-set that's provided in the request.
+ //
+ // The default value is COPY .
+ //
+ // Directory buckets - For directory buckets in a CopyObject operation, only the
+ // empty tag-set is supported. Any requests that attempt to write non-empty tags
+ // into directory buckets will receive a 501 Not Implemented status code. When the
+ // destination bucket is a directory bucket, you will receive a 501 Not Implemented
+ // response in any of the following situations:
+ //
+ // - When you attempt to COPY the tag-set from an S3 source object that has
+ // non-empty tags.
+ //
+ // - When you attempt to REPLACE the tag-set of a source object and set a
+ // non-empty value to x-amz-tagging .
+ //
+ // - When you don't set the x-amz-tagging-directive header and the source object
+ // has non-empty tags. This is because the default value of
+ // x-amz-tagging-directive is COPY .
+ //
+ // Because only the empty tag-set is supported for directory buckets in a
+ // CopyObject operation, the following situations are allowed:
+ //
+ // - When you attempt to COPY the tag-set from a directory bucket source object
+ // that has no tags to a general purpose bucket. It copies an empty tag-set to the
+ // destination object.
+ //
+ // - When you attempt to REPLACE the tag-set of a directory bucket source object
+ // and set the x-amz-tagging value of the directory bucket destination object to
+ // empty.
+ //
+ // - When you attempt to REPLACE the tag-set of a general purpose bucket source
+ // object that has non-empty tags and set the x-amz-tagging value of the
+ // directory bucket destination object to empty.
+ //
+ // - When you attempt to REPLACE the tag-set of a directory bucket source object
+ // and don't set the x-amz-tagging value of the directory bucket destination
+ // object. This is because the default value of x-amz-tagging is the empty value.
+ TaggingDirective types.TaggingDirective
+
+ // If the destination bucket is configured as a website, redirects requests for
+ // this object copy to another object in the same bucket or to an external URL.
+ // Amazon S3 stores the value of this header in the object metadata. This value is
+ // unique to each object and is not copied when using the x-amz-metadata-directive
+ // header. Instead, you may opt to provide this header in combination with the
+ // x-amz-metadata-directive header.
+ //
+ // This functionality is not supported for directory buckets.
+ WebsiteRedirectLocation *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.CopySource = in.CopySource
+ p.Key = in.Key
+ p.DisableS3ExpressSessionAuth = ptr.Bool(true)
+}
+
+type CopyObjectOutput struct {
+
+ // Indicates whether the copied object uses an S3 Bucket Key for server-side
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // Container for all response elements.
+ CopyObjectResult *types.CopyObjectResult
+
+ // Version ID of the source object that was copied.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
+ CopySourceVersionId *string
+
+ // If the object expiration is configured, the response includes this header.
+ //
+ // Object expiration information is not returned in directory buckets and this
+ // header returns the value " NotImplemented " in all responses for directory
+ // buckets.
+ Expiration *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to confirm the encryption
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to provide the round-trip
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // If present, indicates the Amazon Web Services KMS Encryption Context to use for
+ // object encryption. The value of this header is a Base64 encoded UTF-8 string
+ // holding JSON with the encryption context key-value pairs.
+ SSEKMSEncryptionContext *string
+
+ // If present, indicates the ID of the KMS key that was used for object encryption.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when you store this object in Amazon
+ // S3 (for example, AES256 , aws:kms , aws:kms:dsse ).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Version ID of the newly created copy.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCopyObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCopyObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CopyObject"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCopyObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCopyObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addCopyObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *CopyObjectInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opCopyObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CopyObject",
+ }
+}
+
+// getCopyObjectBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getCopyObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CopyObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCopyObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCopyObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go
new file mode 100644
index 000000000..5dfadb4f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go
@@ -0,0 +1,417 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts
+// bucket, see [CreateBucket]CreateBucket .
+//
+// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have
+// a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous
+// requests are never allowed to create buckets. By creating the bucket, you become
+// the bucket owner.
+//
+// There are two types of buckets: general purpose buckets and directory buckets.
+// For more information about these bucket types, see [Creating, configuring, and working with Amazon S3 buckets]in the Amazon S3 User Guide.
+//
+// - General purpose buckets - If you send your CreateBucket request to the
+// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So
+// the signature calculations in Signature Version 4 must use us-east-1 as the
+// Region, even if the location constraint in the request specifies another Region
+// where the bucket is to be created. If you create a bucket in a Region other than
+// US East (N. Virginia), your application must be able to handle 307 redirect. For
+// more information, see [Virtual hosting of buckets]in the Amazon S3 User Guide.
+//
+// - Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Regional endpoint. These endpoints support path-style
+// requests in the format
+// https://s3express-control.region-code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - In addition to the s3:CreateBucket
+// permission, the following permissions are required in a policy when your
+// CreateBucket request includes specific headers:
+//
+// - Access control lists (ACLs) - In your CreateBucket request, if you specify
+// an access control list (ACL) and set it to public-read , public-read-write ,
+// authenticated-read , or if you explicitly specify any other custom ACLs, both
+// s3:CreateBucket and s3:PutBucketAcl permissions are required. In your
+// CreateBucket request, if you set the ACL to private , or if you don't specify
+// any ACLs, only the s3:CreateBucket permission is required.
+//
+// - Object Lock - In your CreateBucket request, if you set
+// x-amz-bucket-object-lock-enabled to true, the
+// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are
+// required.
+//
+// - S3 Object Ownership - If your CreateBucket request includes the
+// x-amz-object-ownership header, then the s3:PutBucketOwnershipControls
+// permission is required.
+//
+// To set an ACL on a bucket as part of a CreateBucket request, you must explicitly
+//
+// set S3 Object Ownership for the bucket to a different value than the default,
+// BucketOwnerEnforced . Additionally, if your desired bucket ACL grants public
+// access, you must first create the bucket (without the bucket ACL) and then
+// explicitly disable Block Public Access on the bucket before using PutBucketAcl
+// to set the ACL. If you try to create a bucket with a public ACL, the request
+// will fail.
+//
+// For the majority of modern use cases in S3, we recommend that you keep all
+//
+// Block Public Access settings enabled and keep ACLs disabled. If you would like
+// to share data with users outside of your account, you can use bucket policies as
+// needed. For more information, see [Controlling ownership of objects and disabling ACLs for your bucket]and [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide.
+//
+// - S3 Block Public Access - If your specific use case requires granting public
+// access to your S3 resources, you can disable Block Public Access. Specifically,
+// you can create a new bucket with Block Public Access enabled, then separately
+// call the [DeletePublicAccessBlock]DeletePublicAccessBlock API. To use this operation, you must have the
+// s3:PutBucketPublicAccessBlock permission. For more information about S3 Block
+// Public Access, see [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - You must have the s3express:CreateBucket
+// permission in an IAM identity-based policy instead of a bucket policy.
+// Cross-account access to this API operation isn't supported. This operation can
+// only be performed by the Amazon Web Services account that owns the resource. For
+// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the
+// Amazon S3 User Guide.
+//
+// The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public
+//
+// Access are not supported for directory buckets. For directory buckets, all Block
+// Public Access settings are enabled at the bucket level and S3 Object Ownership
+// is set to Bucket owner enforced (ACLs disabled). These settings can't be
+// modified.
+//
+// For more information about permissions for creating and working with directory
+//
+// buckets, see [Directory buckets]in the Amazon S3 User Guide. For more information about
+// supported S3 features for directory buckets, see [Features of S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region-code.amazonaws.com .
+//
+// The following operations are related to CreateBucket :
+//
+// [PutObject]
+//
+// [DeleteBucket]
+//
+// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html
+// [Virtual hosting of buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+//
+// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html
+// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
+// [Features of S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+// [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+// [Blocking public access to your Amazon S3 storage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html
+func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) {
+ if params == nil {
+ params = &CreateBucketInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateBucket", params, optFns, c.addOperationCreateBucketMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateBucketOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateBucketInput struct {
+
+ // The name of the bucket to create.
+ //
+ // General purpose buckets - For information about bucket naming restrictions, see [Bucket naming rules]
+ // in the Amazon S3 User Guide.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region-code.amazonaws.com/bucket-name .
+ // Virtual-hosted-style requests aren't supported. Directory bucket names must be
+ // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must
+ // also follow the format bucket-base-name--zone-id--x-s3 (for example,
+ // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [Bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The canned ACL to apply to the bucket.
+ //
+ // This functionality is not supported for directory buckets.
+ ACL types.BucketCannedACL
+
+ // The configuration information for the bucket.
+ CreateBucketConfiguration *types.CreateBucketConfiguration
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ //
+ // This functionality is not supported for directory buckets.
+ GrantFullControl *string
+
+ // Allows grantee to list the objects in the bucket.
+ //
+ // This functionality is not supported for directory buckets.
+ GrantRead *string
+
+ // Allows grantee to read the bucket ACL.
+ //
+ // This functionality is not supported for directory buckets.
+ GrantReadACP *string
+
+ // Allows grantee to create new objects in the bucket.
+ //
+ // For the bucket and object owners of existing objects, also allows deletions and
+ // overwrites of those objects.
+ //
+ // This functionality is not supported for directory buckets.
+ GrantWrite *string
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ //
+ // This functionality is not supported for directory buckets.
+ GrantWriteACP *string
+
+ // Specifies whether you want S3 Object Lock to be enabled for the new bucket.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockEnabledForBucket *bool
+
+ // The container element for object ownership for a bucket's ownership controls.
+ //
+ // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the
+ // bucket owner if the objects are uploaded with the bucket-owner-full-control
+ // canned ACL.
+ //
+ // ObjectWriter - The uploading account will own the object if the object is
+ // uploaded with the bucket-owner-full-control canned ACL.
+ //
+ // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer
+ // affect permissions. The bucket owner automatically owns and has full control
+ // over every object in the bucket. The bucket only accepts PUT requests that don't
+ // specify an ACL or specify bucket owner full control ACLs (such as the predefined
+ // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants
+ // the same permissions).
+ //
+ // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are
+ // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where
+ // you must control access for each object individually. For more information about
+ // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets. Directory buckets
+ // use the bucket owner enforced setting for S3 Object Ownership.
+ //
+ // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+ ObjectOwnership types.ObjectOwnership
+
+ noSmithyDocumentSerde
+}
+
+func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+ p.DisableAccessPoints = ptr.Bool(true)
+}
+
+type CreateBucketOutput struct {
+
+ // A forward slash followed by the name of the bucket.
+ Location *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucket"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateBucketValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucket(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addCreateBucketUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *CreateBucketInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opCreateBucket(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateBucket",
+ }
+}
+
+// getCreateBucketBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getCreateBucketBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CreateBucketInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCreateBucketUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCreateBucketBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: false,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go
new file mode 100644
index 000000000..fbac4458f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go
@@ -0,0 +1,293 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a metadata table configuration for a general purpose bucket. For more
+// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide.
+//
+// Permissions To use this operation, you must have the following permissions. For
+// more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide.
+//
+// If you also want to integrate your table bucket with Amazon Web Services
+// analytics services so that you can query your metadata table, you need
+// additional permissions. For more information, see [Integrating Amazon S3 Tables with Amazon Web Services analytics services]in the Amazon S3 User Guide.
+//
+// - s3:CreateBucketMetadataTableConfiguration
+//
+// - s3tables:CreateNamespace
+//
+// - s3tables:GetTable
+//
+// - s3tables:CreateTable
+//
+// - s3tables:PutTablePolicy
+//
+// The following operations are related to CreateBucketMetadataTableConfiguration :
+//
+// [DeleteBucketMetadataTableConfiguration]
+//
+// [GetBucketMetadataTableConfiguration]
+//
+// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html
+// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html
+// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html
+// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html
+// [Integrating Amazon S3 Tables with Amazon Web Services analytics services]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-aws.html
+func (c *Client) CreateBucketMetadataTableConfiguration(ctx context.Context, params *CreateBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*CreateBucketMetadataTableConfigurationOutput, error) {
+ if params == nil {
+ params = &CreateBucketMetadataTableConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateBucketMetadataTableConfiguration", params, optFns, c.addOperationCreateBucketMetadataTableConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateBucketMetadataTableConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateBucketMetadataTableConfigurationInput struct {
+
+ // The general purpose bucket that you want to create the metadata table
+ // configuration in.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The contents of your metadata table configuration.
+ //
+ // This member is required.
+ MetadataTableConfiguration *types.MetadataTableConfiguration
+
+ // The checksum algorithm to use with your metadata table configuration.
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The Content-MD5 header for the metadata table configuration.
+ ContentMD5 *string
+
+ // The expected owner of the general purpose bucket that contains your metadata
+ // table configuration.
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *CreateBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type CreateBucketMetadataTableConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucketMetadataTableConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucketMetadataTableConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *CreateBucketMetadataTableConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateBucketMetadataTableConfiguration",
+ }
+}
+
+// getCreateBucketMetadataTableConfigurationRequestAlgorithmMember gets the
+// request checksum algorithm value provided as input.
+func getCreateBucketMetadataTableConfigurationRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*CreateBucketMetadataTableConfigurationInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getCreateBucketMetadataTableConfigurationRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getCreateBucketMetadataTableConfigurationBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getCreateBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CreateBucketMetadataTableConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCreateBucketMetadataTableConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go
new file mode 100644
index 000000000..8b272620a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go
@@ -0,0 +1,999 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// This action initiates a multipart upload and returns an upload ID. This upload
+// ID is used to associate all of the parts in the specific multipart upload. You
+// specify this upload ID in each of your subsequent upload part requests (see [UploadPart]).
+// You also include this upload ID in the final request to either complete or abort
+// the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview]
+// in the Amazon S3 User Guide.
+//
+// After you initiate a multipart upload and upload one or more parts, to stop
+// being charged for storing the uploaded parts, you must either complete or abort
+// the multipart upload. Amazon S3 frees up the space used to store the parts and
+// stops charging you for storing them only after you either complete or abort a
+// multipart upload.
+//
+// If you have configured a lifecycle rule to abort incomplete multipart uploads,
+// the created multipart upload must be completed within the number of days
+// specified in the bucket lifecycle configuration. Otherwise, the incomplete
+// multipart upload becomes eligible for an abort action and Amazon S3 aborts the
+// multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration].
+//
+// - Directory buckets - S3 Lifecycle is not supported by directory buckets.
+//
+// - Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support
+// virtual-hosted-style requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Request signing For request signing, multipart upload is just a series of
+// regular requests. You initiate a multipart upload, send one or more requests to
+// upload parts, and then complete the multipart upload process. You sign each
+// request individually. There is nothing special about signing multipart upload
+// requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)]in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - To perform a multipart upload with
+// encryption using an Key Management Service (KMS) KMS key, the requester must
+// have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key.
+// The requester must also have permissions for the kms:GenerateDataKey action
+// for the CreateMultipartUpload API. Then, the requester needs permissions for
+// the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These
+// permissions are required because Amazon S3 must decrypt and read data from the
+// encrypted file parts before it completes the multipart upload. For more
+// information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// Encryption
+//
+// - General purpose buckets - Server-side encryption is for data encryption at
+// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers
+// and decrypts it when you access it. Amazon S3 automatically encrypts all new
+// objects that are uploaded to an S3 bucket. When doing a multipart upload, if you
+// don't specify encryption information in your request, the encryption setting of
+// the uploaded parts is set to the default encryption configuration of the
+// destination bucket. By default, all buckets have a base level of encryption
+// configuration that uses server-side encryption with Amazon S3 managed keys
+// (SSE-S3). If the destination bucket has a default encryption configuration that
+// uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS),
+// or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding
+// KMS key, or a customer-provided key to encrypt the uploaded parts. When you
+// perform a CreateMultipartUpload operation, if you want to use a different type
+// of encryption setting for the uploaded parts, you can request that Amazon S3
+// encrypts the object with a different encryption key (such as an Amazon S3
+// managed key, a KMS key, or a customer-provided key). When the encryption setting
+// in your request is different from the default encryption configuration of the
+// destination bucket, the encryption setting in your request takes precedence. If
+// you choose to provide your own encryption key, the request headers you provide
+// in [UploadPart]and [UploadPartCopy]requests must match the headers you used in the CreateMultipartUpload
+// request.
+//
+// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (
+// aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS)
+// – If you want Amazon Web Services to manage the keys used to encrypt data,
+// specify the following headers in the request.
+//
+// - x-amz-server-side-encryption
+//
+// - x-amz-server-side-encryption-aws-kms-key-id
+//
+// - x-amz-server-side-encryption-context
+//
+// - If you specify x-amz-server-side-encryption:aws:kms , but don't provide
+// x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web
+// Services managed key ( aws/s3 key) in KMS to protect the data.
+//
+// - To perform a multipart upload with encryption by using an Amazon Web
+// Services KMS key, the requester must have permission to the kms:Decrypt and
+// kms:GenerateDataKey* actions on the key. These permissions are required
+// because Amazon S3 must decrypt and read data from the encrypted file parts
+// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in
+// the Amazon S3 User Guide.
+//
+// - If your Identity and Access Management (IAM) user or role is in the same
+// Amazon Web Services account as the KMS key, then you must have these permissions
+// on the key policy. If your IAM user or role is in a different account from the
+// key, then you must have the permissions on both the key policy and your IAM user
+// or role.
+//
+// - All GET and PUT requests for an object protected by KMS fail if you don't
+// make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS),
+// or Signature Version 4. For information about configuring any of the officially
+// supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication]in the
+// Amazon S3 User Guide.
+//
+// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys]
+//
+// in the Amazon S3 User Guide.
+//
+// - Use customer-provided encryption keys (SSE-C) – If you want to manage your
+// own encryption keys, provide all the following headers in the request.
+//
+// - x-amz-server-side-encryption-customer-algorithm
+//
+// - x-amz-server-side-encryption-customer-key
+//
+// - x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about server-side encryption with customer-provided
+//
+// encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]in the Amazon S3 User Guide.
+//
+// - Directory buckets - For directory buckets, there are only two supported
+// options for server-side encryption: server-side encryption with Amazon S3
+// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys
+// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses
+// the desired encryption configuration and you don't override the bucket default
+// encryption in your CreateSession requests or PUT object requests. Then, new
+// objects are automatically encrypted with the desired encryption settings. For
+// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about
+// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads].
+//
+// In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the
+//
+// encryption request headers must match the encryption settings that are specified
+// in the CreateSession request. You can't override the values of the encryption
+// settings ( x-amz-server-side-encryption ,
+// x-amz-server-side-encryption-aws-kms-key-id ,
+// x-amz-server-side-encryption-context , and
+// x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the
+// CreateSession request. You don't need to explicitly specify these encryption
+// settings values in Zonal endpoint API calls, and Amazon S3 will use the
+// encryption settings values from the CreateSession request to protect new
+// objects in the directory bucket.
+//
+// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the
+//
+// session token refreshes automatically to avoid service interruptions when a
+// session expires. The CLI or the Amazon Web Services SDKs use the bucket's
+// default encryption configuration for the CreateSession request. It's not
+// supported to override the encryption settings values in the CreateSession
+// request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption
+// request headers must match the default encryption configuration of the directory
+// bucket.
+//
+// For directory buckets, when you perform a CreateMultipartUpload operation and an
+//
+// UploadPartCopy operation, the request headers you provide in the
+// CreateMultipartUpload request must match the default encryption configuration
+// of the destination bucket.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following operations are related to CreateMultipartUpload :
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [AbortMultipartUpload]
+//
+// [ListParts]
+//
+// [ListMultipartUploads]
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [Protecting Data Using Server-Side Encryption with KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html
+// [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions
+// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html
+// [Protecting data using server-side encryption with Amazon Web Services KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+//
+// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html
+// [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html
+// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
+func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) {
+ if params == nil {
+ params = &CreateMultipartUploadInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateMultipartUpload", params, optFns, c.addOperationCreateMultipartUploadMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateMultipartUploadOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateMultipartUploadInput struct {
+
+ // The name of the bucket where the multipart upload is initiated and where the
+ // object is uploaded.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the multipart upload is to be initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // The canned ACL to apply to the object. Amazon S3 supports a set of predefined
+ // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and
+ // permissions. For more information, see [Canned ACL]in the Amazon S3 User Guide.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can grant access permissions to individual Amazon
+ // Web Services accounts or to predefined groups defined by Amazon S3. These
+ // permissions are then added to the access control list (ACL) on the new object.
+ // For more information, see [Using ACLs]. One way to grant the permissions using the request
+ // headers is to specify a canned ACL with the x-amz-acl request header.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
+ // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
+ ACL types.ObjectCannedACL
+
+ // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
+ // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // General purpose buckets - Setting this header to true causes Amazon S3 to use
+ // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this
+ // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key.
+ //
+ // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT
+ // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't
+ // supported, when you copy SSE-KMS encrypted objects from general purpose buckets
+ // to directory buckets, from directory buckets to general purpose buckets, or
+ // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a
+ // call to KMS every time a copy request is made for a KMS-encrypted object.
+ //
+ // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+ // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job
+ // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+ // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops
+ BucketKeyEnabled *bool
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string
+
+ // Indicates the algorithm that you want Amazon S3 to use to create the checksum
+ // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // Indicates the checksum type that you want Amazon S3 to use to calculate the
+ // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType types.ChecksumType
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string
+
+ // Specifies what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field.
+ //
+ // For directory buckets, only the aws-chunked value is supported in this header
+ // field.
+ ContentEncoding *string
+
+ // The language that the content is in.
+ ContentLanguage *string
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time
+
+ // Specify access permissions explicitly to give the grantee READ, READ_ACP, and
+ // WRITE_ACP permissions on the object.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header maps
+ // to specific permissions that Amazon S3 supports in an ACL. For more information,
+ // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
+ // - id – if the value specified is the canonical user ID of an Amazon Web
+ // Services account
+ //
+ // - uri – if you are granting permissions to a predefined group
+ //
+ // - emailAddress – if the value specified is the email address of an Amazon Web
+ // Services account
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
+ // - US East (N. Virginia)
+ //
+ // - US West (N. California)
+ //
+ // - US West (Oregon)
+ //
+ // - Asia Pacific (Singapore)
+ //
+ // - Asia Pacific (Sydney)
+ //
+ // - Asia Pacific (Tokyo)
+ //
+ // - Europe (Ireland)
+ //
+ // - South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+ // Amazon Web Services General Reference.
+ //
+ // For example, the following x-amz-grant-read header grants the Amazon Web
+ // Services accounts identified by account IDs permissions to read object data and
+ // its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+ GrantFullControl *string
+
+ // Specify access permissions explicitly to allow grantee to read the object data
+ // and its metadata.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header maps
+ // to specific permissions that Amazon S3 supports in an ACL. For more information,
+ // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
+ // - id – if the value specified is the canonical user ID of an Amazon Web
+ // Services account
+ //
+ // - uri – if you are granting permissions to a predefined group
+ //
+ // - emailAddress – if the value specified is the email address of an Amazon Web
+ // Services account
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
+ // - US East (N. Virginia)
+ //
+ // - US West (N. California)
+ //
+ // - US West (Oregon)
+ //
+ // - Asia Pacific (Singapore)
+ //
+ // - Asia Pacific (Sydney)
+ //
+ // - Asia Pacific (Tokyo)
+ //
+ // - Europe (Ireland)
+ //
+ // - South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+ // Amazon Web Services General Reference.
+ //
+ // For example, the following x-amz-grant-read header grants the Amazon Web
+ // Services accounts identified by account IDs permissions to read object data and
+ // its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+ GrantRead *string
+
+ // Specify access permissions explicitly to allows grantee to read the object ACL.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header maps
+ // to specific permissions that Amazon S3 supports in an ACL. For more information,
+ // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
+ // - id – if the value specified is the canonical user ID of an Amazon Web
+ // Services account
+ //
+ // - uri – if you are granting permissions to a predefined group
+ //
+ // - emailAddress – if the value specified is the email address of an Amazon Web
+ // Services account
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
+ // - US East (N. Virginia)
+ //
+ // - US West (N. California)
+ //
+ // - US West (Oregon)
+ //
+ // - Asia Pacific (Singapore)
+ //
+ // - Asia Pacific (Sydney)
+ //
+ // - Asia Pacific (Tokyo)
+ //
+ // - Europe (Ireland)
+ //
+ // - South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+ // Amazon Web Services General Reference.
+ //
+ // For example, the following x-amz-grant-read header grants the Amazon Web
+ // Services accounts identified by account IDs permissions to read object data and
+ // its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+ GrantReadACP *string
+
+ // Specify access permissions explicitly to allows grantee to allow grantee to
+ // write the ACL for the applicable object.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header maps
+ // to specific permissions that Amazon S3 supports in an ACL. For more information,
+ // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
+ // - id – if the value specified is the canonical user ID of an Amazon Web
+ // Services account
+ //
+ // - uri – if you are granting permissions to a predefined group
+ //
+ // - emailAddress – if the value specified is the email address of an Amazon Web
+ // Services account
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
+ // - US East (N. Virginia)
+ //
+ // - US West (N. California)
+ //
+ // - US West (Oregon)
+ //
+ // - Asia Pacific (Singapore)
+ //
+ // - Asia Pacific (Sydney)
+ //
+ // - Asia Pacific (Tokyo)
+ //
+ // - Europe (Ireland)
+ //
+ // - South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+ // Amazon Web Services General Reference.
+ //
+ // For example, the following x-amz-grant-read header grants the Amazon Web
+ // Services accounts identified by account IDs permissions to read object data and
+ // its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+ GrantWriteACP *string
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]string
+
+ // Specifies whether you want to apply a legal hold to the uploaded object.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // Specifies the Object Lock mode that you want to apply to the uploaded object.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockMode types.ObjectLockMode
+
+ // Specifies the date and time when you want the Object Lock to expire.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockRetainUntilDate *time.Time
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the customer-provided encryption key
+ // according to RFC 1321. Amazon S3 uses this header for a message integrity check
+ // to ensure that the encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // Specifies the Amazon Web Services KMS Encryption Context to use for object
+ // encryption. The value of this header is a Base64 encoded string of a UTF-8
+ // encoded JSON, which contains the encryption context as key-value pairs.
+ //
+ // Directory buckets - You can optionally provide an explicit encryption context
+ // value. The value must match the default encryption context - the bucket Amazon
+ // Resource Name (ARN). An additional encryption context value is not supported.
+ SSEKMSEncryptionContext *string
+
+ // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object
+ // encryption. If the KMS key doesn't exist in the same account that's issuing the
+ // command, you must use the full Key ARN not the Key ID.
+ //
+ // General purpose buckets - If you specify x-amz-server-side-encryption with
+ // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key
+ // Alias) of the KMS key to use. If you specify
+ // x-amz-server-side-encryption:aws:kms or
+ // x-amz-server-side-encryption:aws:kms:dsse , but do not provide
+ // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web
+ // Services managed key ( aws/s3 ) to protect the data.
+ //
+ // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify
+ // the x-amz-server-side-encryption header to aws:kms . Then, the
+ // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's
+ // default KMS customer managed key ID. If you want to explicitly set the
+ // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's
+ // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS
+ // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3
+ // ) isn't supported.
+ //
+ // Incorrect key specification results in an HTTP 400 Bad Request error.
+ //
+ // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+ // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when you store this object in Amazon
+ // S3 (for example, AES256 , aws:kms ).
+ //
+ // - Directory buckets - For directory buckets, there are only two supported
+ // options for server-side encryption: server-side encryption with Amazon S3
+ // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys
+ // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses
+ // the desired encryption configuration and you don't override the bucket default
+ // encryption in your CreateSession requests or PUT object requests. Then, new
+ // objects are automatically encrypted with the desired encryption settings. For
+ // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about
+ // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads].
+ //
+ // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the
+ // encryption request headers must match the encryption settings that are specified
+ // in the CreateSession request. You can't override the values of the encryption
+ // settings ( x-amz-server-side-encryption ,
+ // x-amz-server-side-encryption-aws-kms-key-id ,
+ // x-amz-server-side-encryption-context , and
+ // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the
+ // CreateSession request. You don't need to explicitly specify these encryption
+ // settings values in Zonal endpoint API calls, and Amazon S3 will use the
+ // encryption settings values from the CreateSession request to protect new
+ // objects in the directory bucket.
+ //
+ // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the
+ // session token refreshes automatically to avoid service interruptions when a
+ // session expires. The CLI or the Amazon Web Services SDKs use the bucket's
+ // default encryption configuration for the CreateSession request. It's not
+ // supported to override the encryption settings values in the CreateSession
+ // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption
+ // request headers must match the default encryption configuration of the directory
+ // bucket.
+ //
+ // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html
+ // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
+ // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+ // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+ ServerSideEncryption types.ServerSideEncryption
+
+ // By default, Amazon S3 uses the STANDARD Storage Class to store newly created
+ // objects. The STANDARD storage class provides high durability and high
+ // availability. Depending on performance needs, you can specify a different
+ // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide.
+ //
+ // - For directory buckets, only the S3 Express One Zone storage class is
+ // supported to store newly created objects.
+ //
+ // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
+ StorageClass types.StorageClass
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ //
+ // This functionality is not supported for directory buckets.
+ Tagging *string
+
+ // If the bucket is configured as a website, redirects requests for this object to
+ // another object in the same bucket or to an external URL. Amazon S3 stores the
+ // value of this header in the object metadata.
+ //
+ // This functionality is not supported for directory buckets.
+ WebsiteRedirectLocation *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type CreateMultipartUploadOutput struct {
+
+ // If the bucket has a lifecycle rule configured with an action to abort
+ // incomplete multipart uploads and the prefix in the lifecycle rule matches the
+ // object name in the request, the response includes this header. The header
+ // indicates when the initiated multipart upload becomes eligible for an abort
+ // operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide.
+ //
+ // The response also includes the x-amz-abort-rule-id header that provides the ID
+ // of the lifecycle configuration rule that defines the abort action.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
+ AbortDate *time.Time
+
+ // This header is returned along with the x-amz-abort-date header. It identifies
+ // the applicable lifecycle configuration rule that defines the action to abort
+ // incomplete multipart uploads.
+ //
+ // This functionality is not supported for directory buckets.
+ AbortRuleId *string
+
+ // The name of the bucket to which the multipart upload was initiated. Does not
+ // return the access point ARN or access point alias if used.
+ //
+ // Access points are not supported by directory buckets.
+ Bucket *string
+
+ // Indicates whether the multipart upload uses an S3 Bucket Key for server-side
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // The algorithm that was used to create a checksum of the object.
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // Indicates the checksum type that you want Amazon S3 to use to calculate the
+ // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType types.ChecksumType
+
+ // Object key for which the multipart upload was initiated.
+ Key *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to confirm the encryption
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to provide the round-trip
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // If present, indicates the Amazon Web Services KMS Encryption Context to use for
+ // object encryption. The value of this header is a Base64 encoded string of a
+ // UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
+ SSEKMSEncryptionContext *string
+
+ // If present, indicates the ID of the KMS key that was used for object encryption.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when you store this object in Amazon
+ // S3 (for example, AES256 , aws:kms ).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // ID for the initiated multipart upload.
+ UploadId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCreateMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateMultipartUpload{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateMultipartUpload"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateMultipartUpload(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *CreateMultipartUploadInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opCreateMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateMultipartUpload",
+ }
+}
+
+// getCreateMultipartUploadBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getCreateMultipartUploadBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CreateMultipartUploadInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCreateMultipartUploadUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCreateMultipartUploadBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go
new file mode 100644
index 000000000..21db13194
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go
@@ -0,0 +1,435 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a session that establishes temporary security credentials to support
+// fast authentication and authorization for the Zonal endpoint API operations on
+// directory buckets. For more information about Zonal endpoint API operations that
+// include the Availability Zone in the request endpoint, see [S3 Express One Zone APIs]in the Amazon S3
+// User Guide.
+//
+// To make Zonal endpoint API requests on a directory bucket, use the CreateSession
+// API operation. Specifically, you grant s3express:CreateSession permission to a
+// bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM
+// credentials to make the CreateSession API request on the bucket, which returns
+// temporary security credentials that include the access key ID, secret access
+// key, session token, and expiration. These credentials have associated
+// permissions to access the Zonal endpoint API operations. After the session is
+// created, you don’t need to use other policies to grant permissions to each Zonal
+// endpoint API individually. Instead, in your Zonal endpoint API requests, you
+// sign your requests by applying the temporary security credentials of the session
+// to the request headers and following the SigV4 protocol for authentication. You
+// also apply the session token to the x-amz-s3session-token request header for
+// authorization. Temporary security credentials are scoped to the bucket and
+// expire after 5 minutes. After the expiration time, any calls that you make with
+// those credentials will fail. You must use IAM credentials again to make a
+// CreateSession API request that generates a new set of temporary credentials for
+// use. Temporary credentials cannot be extended or refreshed beyond the original
+// specified interval.
+//
+// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes
+// automatically to avoid service interruptions when a session expires. We
+// recommend that you use the Amazon Web Services SDKs to initiate and manage
+// requests to the CreateSession API. For more information, see [Performance guidelines and design patterns]in the Amazon S3
+// User Guide.
+//
+// - You must make requests for this API operation to the Zonal endpoint. These
+// endpoints support virtual-hosted-style requests in the format
+// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style
+// requests are not supported. For more information about endpoints in Availability
+// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints
+// in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// - CopyObject API operation - Unlike other Zonal endpoint API operations, the
+// CopyObject API operation doesn't use the temporary security credentials
+// returned from the CreateSession API operation for authentication and
+// authorization. For information about authentication and authorization of the
+// CopyObject API operation on directory buckets, see [CopyObject].
+//
+// - HeadBucket API operation - Unlike other Zonal endpoint API operations, the
+// HeadBucket API operation doesn't use the temporary security credentials
+// returned from the CreateSession API operation for authentication and
+// authorization. For information about authentication and authorization of the
+// HeadBucket API operation on directory buckets, see [HeadBucket].
+//
+// Permissions To obtain temporary security credentials, you must create a bucket
+// policy or an IAM identity-based policy that grants s3express:CreateSession
+// permission to the bucket. In a policy, you can have the s3express:SessionMode
+// condition key to control who can create a ReadWrite or ReadOnly session. For
+// more information about ReadWrite or ReadOnly sessions, see [x-amz-create-session-mode]
+// x-amz-create-session-mode . For example policies, see [Example bucket policies for S3 Express One Zone] and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone] in the Amazon S3
+// User Guide.
+//
+// To grant cross-account access to Zonal endpoint API operations, the bucket
+// policy should also grant both accounts the s3express:CreateSession permission.
+//
+// If you want to encrypt objects with SSE-KMS, you must also have the
+// kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based
+// policies and KMS key policies for the target KMS key.
+//
+// Encryption For directory buckets, there are only two supported options for
+// server-side encryption: server-side encryption with Amazon S3 managed keys
+// (SSE-S3) ( AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms
+// ). We recommend that the bucket's default encryption uses the desired encryption
+// configuration and you don't override the bucket default encryption in your
+// CreateSession requests or PUT object requests. Then, new objects are
+// automatically encrypted with the desired encryption settings. For more
+// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the
+// encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads].
+//
+// For [Zonal endpoint (object-level) API operations] except [CopyObject] and [UploadPartCopy], you authenticate and authorize requests through [CreateSession] for low
+// latency. To encrypt new objects in a directory bucket with SSE-KMS, you must
+// specify SSE-KMS as the directory bucket's default encryption configuration with
+// a KMS key (specifically, a [customer managed key]). Then, when a session is created for Zonal
+// endpoint API operations, new objects are automatically encrypted and decrypted
+// with SSE-KMS and S3 Bucket Keys during the session.
+//
+// Only 1 [customer managed key] is supported per directory bucket for the lifetime of the bucket. The [Amazon Web Services managed key] (
+// aws/s3 ) isn't supported. After you specify SSE-KMS as your bucket's default
+// encryption configuration with a customer managed key, you can't change the
+// customer managed key for the bucket's SSE-KMS configuration.
+//
+// In the Zonal endpoint API calls (except [CopyObject] and [UploadPartCopy]) using the REST API, you can't
+// override the values of the encryption settings ( x-amz-server-side-encryption ,
+// x-amz-server-side-encryption-aws-kms-key-id ,
+// x-amz-server-side-encryption-context , and
+// x-amz-server-side-encryption-bucket-key-enabled ) from the CreateSession
+// request. You don't need to explicitly specify these encryption settings values
+// in Zonal endpoint API calls, and Amazon S3 will use the encryption settings
+// values from the CreateSession request to protect new objects in the directory
+// bucket.
+//
+// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the
+// session token refreshes automatically to avoid service interruptions when a
+// session expires. The CLI or the Amazon Web Services SDKs use the bucket's
+// default encryption configuration for the CreateSession request. It's not
+// supported to override the encryption settings values in the CreateSession
+// request. Also, in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), it's not
+// supported to override the values of the encryption settings from the
+// CreateSession request.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Performance guidelines and design patterns]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [S3 Express One Zone APIs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html
+// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html
+// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
+// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
+// [x-amz-create-session-mode]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters
+// [Zonal endpoint (object-level) API operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-differences.html#s3-express-differences-api-operations
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) {
+ if params == nil {
+ params = &CreateSessionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateSession", params, optFns, c.addOperationCreateSessionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateSessionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateSessionInput struct {
+
+ // The name of the bucket that you create a session for.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
+ // with server-side encryption using KMS keys (SSE-KMS).
+ //
+ // S3 Bucket Keys are always enabled for GET and PUT operations in a directory
+ // bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy
+ // SSE-KMS encrypted objects from general purpose buckets to directory buckets,
+ // from directory buckets to general purpose buckets, or between directory buckets,
+ // through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a
+ // copy request is made for a KMS-encrypted object.
+ //
+ // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+ // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job
+ // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+ // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops
+ BucketKeyEnabled *bool
+
+ // Specifies the Amazon Web Services KMS Encryption Context as an additional
+ // encryption context to use for object encryption. The value of this header is a
+ // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption
+ // context as key-value pairs. This value is stored as object metadata and
+ // automatically gets passed on to Amazon Web Services KMS for future GetObject
+ // operations on this object.
+ //
+ // General purpose buckets - This value must be explicitly added during CopyObject
+ // operations if you want an additional encryption context for your object. For
+ // more information, see [Encryption context]in the Amazon S3 User Guide.
+ //
+ // Directory buckets - You can optionally provide an explicit encryption context
+ // value. The value must match the default encryption context - the bucket Amazon
+ // Resource Name (ARN). An additional encryption context value is not supported.
+ //
+ // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context
+ SSEKMSEncryptionContext *string
+
+ // If you specify x-amz-server-side-encryption with aws:kms , you must specify the
+ // x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key
+ // ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you
+ // get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key
+ // alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist
+ // in the same account that't issuing the command, you must use the full Key ARN
+ // not the Key ID.
+ //
+ // Your SSE-KMS configuration can only support 1 [customer managed key] per directory bucket's lifetime.
+ // The [Amazon Web Services managed key]( aws/s3 ) isn't supported.
+ //
+ // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+ // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm to use when you store objects in the
+ // directory bucket.
+ //
+ // For directory buckets, there are only two supported options for server-side
+ // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256
+ // ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). By default,
+ // Amazon S3 encrypts data with SSE-S3. For more information, see [Protecting data with server-side encryption]in the Amazon S3
+ // User Guide.
+ //
+ // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Specifies the mode of the session that will be created, either ReadWrite or
+ // ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is
+ // capable of executing all the Zonal endpoint API operations on a directory
+ // bucket. A ReadOnly session is constrained to execute the following Zonal
+ // endpoint API operations: GetObject , HeadObject , ListObjectsV2 ,
+ // GetObjectAttributes , ListParts , and ListMultipartUploads .
+ SessionMode types.SessionMode
+
+ noSmithyDocumentSerde
+}
+
+func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.DisableS3ExpressSessionAuth = ptr.Bool(true)
+}
+
+type CreateSessionOutput struct {
+
+ // The established temporary security credentials for the created session.
+ //
+ // This member is required.
+ Credentials *types.SessionCredentials
+
+ // Indicates whether to use an S3 Bucket Key for server-side encryption with KMS
+ // keys (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // If present, indicates the Amazon Web Services KMS Encryption Context to use for
+ // object encryption. The value of this header is a Base64 encoded string of a
+ // UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
+ // This value is stored as object metadata and automatically gets passed on to
+ // Amazon Web Services KMS for future GetObject operations on this object.
+ SSEKMSEncryptionContext *string
+
+ // If you specify x-amz-server-side-encryption with aws:kms , this header indicates
+ // the ID of the KMS symmetric encryption customer managed key that was used for
+ // object encryption.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when you store objects in the
+ // directory bucket.
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpCreateSession{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateSession{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateSession"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateSessionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSession(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addCreateSessionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *CreateSessionInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opCreateSession(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateSession",
+ }
+}
+
+// getCreateSessionBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getCreateSessionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*CreateSessionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addCreateSessionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getCreateSessionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go
new file mode 100644
index 000000000..9e1edf91a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go
@@ -0,0 +1,306 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the S3 bucket. All objects (including all object versions and delete
+// markers) in the bucket must be deleted before the bucket itself can be deleted.
+//
+// - Directory buckets - If multipart uploads in a directory bucket are in
+// progress, you can't delete the bucket until all the in-progress multipart
+// uploads are aborted or completed.
+//
+// - Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Regional endpoint. These endpoints support path-style
+// requests in the format
+// https://s3express-control.region-code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - You must have the s3:DeleteBucket
+// permission on the specified bucket in a policy.
+//
+// - Directory bucket permissions - You must have the s3express:DeleteBucket
+// permission in an IAM identity-based policy instead of a bucket policy.
+// Cross-account access to this API operation isn't supported. This operation can
+// only be performed by the Amazon Web Services account that owns the resource. For
+// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the
+// Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region-code.amazonaws.com .
+//
+// The following operations are related to DeleteBucket :
+//
+// [CreateBucket]
+//
+// [DeleteObject]
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) {
+ if params == nil {
+ params = &DeleteBucketInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucket", params, optFns, c.addOperationDeleteBucketMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketInput struct {
+
+ // Specifies the bucket being deleted.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region-code.amazonaws.com/bucket-name .
+ // Virtual-hosted-style requests aren't supported. Directory bucket names must be
+ // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must
+ // also follow the format bucket-base-name--zone-id--x-s3 (for example,
+ // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucket"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucket(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucket(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucket",
+ }
+}
+
+// getDeleteBucketBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getDeleteBucketBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: false,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
+
+// PresignDeleteBucket is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignDeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &DeleteBucketInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "DeleteBucket", params, clientOptFns,
+ c.client.addOperationDeleteBucketMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ addDeleteBucketPayloadAsUnsigned,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
+
+func addDeleteBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
+ v4.RemoveContentSHA256HeaderMiddleware(stack)
+ v4.RemoveComputePayloadSHA256Middleware(stack)
+ return v4.AddUnsignedPayloadMiddleware(stack)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go
new file mode 100644
index 000000000..a3a1873fa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go
@@ -0,0 +1,250 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Deletes an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about the Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis].
+//
+// The following operations are related to DeleteBucketAnalyticsConfiguration :
+//
+// [GetBucketAnalyticsConfiguration]
+//
+// [ListBucketAnalyticsConfigurations]
+//
+// [PutBucketAnalyticsConfiguration]
+//
+// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html
+// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html
+// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketAnalyticsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketAnalyticsConfiguration", params, optFns, c.addOperationDeleteBucketAnalyticsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketAnalyticsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketAnalyticsConfigurationInput struct {
+
+ // The name of the bucket from which an analytics configuration is deleted.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID that identifies the analytics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketAnalyticsConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketAnalyticsConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketAnalyticsConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketAnalyticsConfiguration",
+ }
+}
+
+// getDeleteBucketAnalyticsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getDeleteBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketAnalyticsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketAnalyticsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go
new file mode 100644
index 000000000..ebae81c34
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go
@@ -0,0 +1,238 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Deletes the cors configuration information set for the bucket.
+//
+// To use this operation, you must have permission to perform the s3:PutBucketCORS
+// action. The bucket owner has this permission by default and can grant this
+// permission to others.
+//
+// For information about cors , see [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide.
+//
+// # Related Resources
+//
+// [PutBucketCors]
+//
+// [RESTOPTIONSobject]
+//
+// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html
+// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
+// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html
+func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) {
+ if params == nil {
+ params = &DeleteBucketCorsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketCors", params, optFns, c.addOperationDeleteBucketCorsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketCorsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketCorsInput struct {
+
+ // Specifies the bucket whose cors configuration is being deleted.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketCorsOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketCors"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketCors(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketCorsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketCors(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketCors",
+ }
+}
+
+// getDeleteBucketCorsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getDeleteBucketCorsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketCorsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketCorsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go
new file mode 100644
index 000000000..bf654025b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go
@@ -0,0 +1,275 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This implementation of the DELETE action resets the default encryption for the
+// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3).
+//
+// - General purpose buckets - For information about the bucket default
+// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide.
+//
+// - Directory buckets - For directory buckets, there are only two supported
+// options for server-side encryption: SSE-S3 and SSE-KMS. For information about
+// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets].
+//
+// Permissions
+//
+// - General purpose bucket permissions - The s3:PutEncryptionConfiguration
+// permission is required in a policy. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// - Directory bucket permissions - To grant access to this API operation, you
+// must have the s3express:PutEncryptionConfiguration permission in an IAM
+// identity-based policy instead of a bucket policy. Cross-account access to this
+// API operation isn't supported. This operation can only be performed by the
+// Amazon Web Services account that owns the resource. For more information about
+// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region-code.amazonaws.com .
+//
+// The following operations are related to DeleteBucketEncryption :
+//
+// [PutBucketEncryption]
+//
+// [GetBucketEncryption]
+//
+// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html
+// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html
+// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html
+// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) {
+ if params == nil {
+ params = &DeleteBucketEncryptionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketEncryption", params, optFns, c.addOperationDeleteBucketEncryptionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketEncryptionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketEncryptionInput struct {
+
+ // The name of the bucket containing the server-side encryption configuration to
+ // delete.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region-code.amazonaws.com/bucket-name .
+ // Virtual-hosted-style requests aren't supported. Directory bucket names must be
+ // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must
+ // also follow the format bucket-base-name--zone-id--x-s3 (for example,
+ // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketEncryptionOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketEncryption"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketEncryption(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketEncryptionInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketEncryption",
+ }
+}
+
+// getDeleteBucketEncryptionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketEncryptionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketEncryptionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketEncryptionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go
new file mode 100644
index 000000000..a62386c3e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go
@@ -0,0 +1,252 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
+//
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
+// without performance impact or operational overhead. S3 Intelligent-Tiering
+// delivers automatic cost savings in three low latency and high throughput access
+// tiers. To get the lowest storage cost on data that can be accessed in minutes to
+// hours, you can choose to activate additional archiving capabilities.
+//
+// The S3 Intelligent-Tiering storage class is the ideal storage class for data
+// with unknown, changing, or unpredictable access patterns, independent of object
+// size or retention period. If the size of an object is less than 128 KB, it is
+// not monitored and not eligible for auto-tiering. Smaller objects can be stored,
+// but they are always charged at the Frequent Access tier rates in the S3
+// Intelligent-Tiering storage class.
+//
+// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// Operations related to DeleteBucketIntelligentTieringConfiguration include:
+//
+// [GetBucketIntelligentTieringConfiguration]
+//
+// [PutBucketIntelligentTieringConfiguration]
+//
+// [ListBucketIntelligentTieringConfigurations]
+//
+// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html
+// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html
+// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
+func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketIntelligentTieringConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketIntelligentTieringConfiguration", params, optFns, c.addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketIntelligentTieringConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketIntelligentTieringConfigurationInput struct {
+
+ // The name of the Amazon S3 bucket whose configuration you want to modify or
+ // retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ Id *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketIntelligentTieringConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketIntelligentTieringConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketIntelligentTieringConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketIntelligentTieringConfiguration",
+ }
+}
+
+// getDeleteBucketIntelligentTieringConfigurationBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getDeleteBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketIntelligentTieringConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketIntelligentTieringConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go
new file mode 100644
index 000000000..d24e3f754
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go
@@ -0,0 +1,250 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Deletes an inventory configuration (identified by the inventory ID) from the
+// bucket.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutInventoryConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory].
+//
+// Operations related to DeleteBucketInventoryConfiguration include:
+//
+// [GetBucketInventoryConfiguration]
+//
+// [PutBucketInventoryConfiguration]
+//
+// [ListBucketInventoryConfigurations]
+//
+// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html
+// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html
+// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html
+func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketInventoryConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketInventoryConfiguration", params, optFns, c.addOperationDeleteBucketInventoryConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketInventoryConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketInventoryConfigurationInput struct {
+
+ // The name of the bucket containing the inventory configuration to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the inventory configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketInventoryConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketInventoryConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketInventoryConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketInventoryConfiguration",
+ }
+}
+
+// getDeleteBucketInventoryConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getDeleteBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketInventoryConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketInventoryConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go
new file mode 100644
index 000000000..489be24c4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go
@@ -0,0 +1,277 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the lifecycle configuration from the specified bucket. Amazon S3
+// removes all the lifecycle configuration rules in the lifecycle subresource
+// associated with the bucket. Your objects never expire, and Amazon S3 no longer
+// automatically deletes any objects on the basis of rules contained in the deleted
+// lifecycle configuration.
+//
+// Permissions
+// - General purpose bucket permissions - By default, all Amazon S3 resources
+// are private, including buckets, objects, and related subresources (for example,
+// lifecycle configuration and website configuration). Only the resource owner
+// (that is, the Amazon Web Services account that created it) can access the
+// resource. The resource owner can optionally grant access permissions to others
+// by writing an access policy. For this operation, a user must have the
+// s3:PutLifecycleConfiguration permission.
+//
+// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// - Directory bucket permissions - You must have the
+// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy
+// to use this operation. Cross-account access to this API operation isn't
+// supported. The resource owner can optionally grant access permissions to others
+// by creating a role or user for them as long as they are within the same account
+// as the owner and resource.
+//
+// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in
+//
+// the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+//
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region-code.amazonaws.com/bucket-name
+// . Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
+//
+// For more information about the object expiration, see [Elements to Describe Lifecycle Actions].
+//
+// Related actions include:
+//
+// [PutBucketLifecycleConfiguration]
+//
+// [GetBucketLifecycleConfiguration]
+//
+// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
+// [Elements to Describe Lifecycle Actions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions
+// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
+// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) {
+ if params == nil {
+ params = &DeleteBucketLifecycleInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketLifecycle", params, optFns, c.addOperationDeleteBucketLifecycleMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketLifecycleOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketLifecycleInput struct {
+
+ // The bucket name of the lifecycle to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketLifecycleOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketLifecycle{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketLifecycle{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketLifecycle"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketLifecycle(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketLifecycleInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketLifecycle(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketLifecycle",
+ }
+}
+
+// getDeleteBucketLifecycleBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketLifecycleBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketLifecycleInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketLifecycleUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketLifecycleBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go
new file mode 100644
index 000000000..880d74d94
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go
@@ -0,0 +1,237 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes a metadata table configuration from a general purpose bucket. For more
+//
+// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide.
+//
+// Permissions To use this operation, you must have the
+// s3:DeleteBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables]
+// in the Amazon S3 User Guide.
+//
+// The following operations are related to DeleteBucketMetadataTableConfiguration :
+//
+// [CreateBucketMetadataTableConfiguration]
+//
+// [GetBucketMetadataTableConfiguration]
+//
+// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html
+// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html
+// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html
+// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html
+func (c *Client) DeleteBucketMetadataTableConfiguration(ctx context.Context, params *DeleteBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetadataTableConfigurationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketMetadataTableConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetadataTableConfiguration", params, optFns, c.addOperationDeleteBucketMetadataTableConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketMetadataTableConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketMetadataTableConfigurationInput struct {
+
+ // The general purpose bucket that you want to remove the metadata table
+ // configuration from.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The expected bucket owner of the general purpose bucket that you want to
+ // remove the metadata table configuration from.
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketMetadataTableConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetadataTableConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketMetadataTableConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketMetadataTableConfiguration",
+ }
+}
+
+// getDeleteBucketMetadataTableConfigurationBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getDeleteBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketMetadataTableConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketMetadataTableConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go
new file mode 100644
index 000000000..ed96b0253
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go
@@ -0,0 +1,254 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Deletes a metrics configuration for the Amazon CloudWatch request metrics
+// (specified by the metrics configuration ID) from the bucket. Note that this
+// doesn't include the daily storage metrics.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutMetricsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch].
+//
+// The following operations are related to DeleteBucketMetricsConfiguration :
+//
+// [GetBucketMetricsConfiguration]
+//
+// [PutBucketMetricsConfiguration]
+//
+// [ListBucketMetricsConfigurations]
+//
+// [Monitoring Metrics with Amazon CloudWatch]
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html
+// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html
+// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketMetricsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetricsConfiguration", params, optFns, c.addOperationDeleteBucketMetricsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketMetricsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketMetricsConfigurationInput struct {
+
+ // The name of the bucket containing the metrics configuration to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the metrics configuration. The ID has a 64 character
+ // limit and can only contain letters, numbers, periods, dashes, and underscores.
+ //
+ // This member is required.
+ Id *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketMetricsConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetricsConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketMetricsConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketMetricsConfiguration",
+ }
+}
+
+// getDeleteBucketMetricsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getDeleteBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketMetricsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketMetricsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go
new file mode 100644
index 000000000..4775a2516
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go
@@ -0,0 +1,235 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you
+// must have the s3:PutBucketOwnershipControls permission. For more information
+// about Amazon S3 permissions, see [Specifying Permissions in a Policy].
+//
+// For information about Amazon S3 Object Ownership, see [Using Object Ownership].
+//
+// The following operations are related to DeleteBucketOwnershipControls :
+//
+// # GetBucketOwnershipControls
+//
+// # PutBucketOwnershipControls
+//
+// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) {
+ if params == nil {
+ params = &DeleteBucketOwnershipControlsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketOwnershipControls", params, optFns, c.addOperationDeleteBucketOwnershipControlsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketOwnershipControlsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketOwnershipControlsInput struct {
+
+ // The Amazon S3 bucket whose OwnershipControls you want to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketOwnershipControlsOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketOwnershipControls"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketOwnershipControlsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketOwnershipControls",
+ }
+}
+
+// getDeleteBucketOwnershipControlsBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getDeleteBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketOwnershipControlsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketOwnershipControlsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go
new file mode 100644
index 000000000..048abe79e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go
@@ -0,0 +1,287 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the policy of a specified bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions If you are using an identity other than the root user of the Amazon
+// Web Services account that owns the bucket, the calling identity must both have
+// the DeleteBucketPolicy permissions on the specified bucket and belong to the
+// bucket owner's account in order to use this operation.
+//
+// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403
+// Access Denied error. If you have the correct permissions, but you're not using
+// an identity that belongs to the bucket owner's account, Amazon S3 returns a 405
+// Method Not Allowed error.
+//
+// To ensure that bucket owners don't inadvertently lock themselves out of their
+// own buckets, the root principal in a bucket owner's Amazon Web Services account
+// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API
+// actions, even if their bucket policy explicitly denies the root principal's
+// access. Bucket owner root principals can only be blocked from performing these
+// API actions by VPC endpoint policies and Amazon Web Services Organizations
+// policies.
+//
+// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is
+// required in a policy. For more information about general purpose buckets bucket
+// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation, you
+// must have the s3express:DeleteBucketPolicy permission in an IAM identity-based
+// policy instead of a bucket policy. Cross-account access to this API operation
+// isn't supported. This operation can only be performed by the Amazon Web Services
+// account that owns the resource. For more information about directory bucket
+// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region-code.amazonaws.com .
+//
+// # The following operations are related to DeleteBucketPolicy
+//
+// [CreateBucket]
+//
+// [DeleteObject]
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) {
+ if params == nil {
+ params = &DeleteBucketPolicyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketPolicy", params, optFns, c.addOperationDeleteBucketPolicyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketPolicyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketPolicyInput struct {
+
+ // The bucket name.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region-code.amazonaws.com/bucket-name .
+ // Virtual-hosted-style requests aren't supported. Directory bucket names must be
+ // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must
+ // also follow the format bucket-base-name--zone-id--x-s3 (for example,
+ // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketPolicyOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketPolicy"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketPolicy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketPolicyInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketPolicy",
+ }
+}
+
+// getDeleteBucketPolicyBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketPolicyBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketPolicyInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketPolicyBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go
new file mode 100644
index 000000000..f5c434b8d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go
@@ -0,0 +1,245 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Deletes the replication configuration from the bucket.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutReplicationConfiguration action. The bucket owner has these permissions by
+// default and can grant it to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations]
+// and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// It can take a while for the deletion of a replication configuration to fully
+// propagate.
+//
+// For information about replication configuration, see [Replication] in the Amazon S3 User
+// Guide.
+//
+// The following operations are related to DeleteBucketReplication :
+//
+// [PutBucketReplication]
+//
+// [GetBucketReplication]
+//
+// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
+// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) {
+ if params == nil {
+ params = &DeleteBucketReplicationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketReplication", params, optFns, c.addOperationDeleteBucketReplicationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketReplicationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketReplicationInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketReplicationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketReplication"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketReplication(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketReplicationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketReplication",
+ }
+}
+
+// getDeleteBucketReplicationBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketReplicationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketReplicationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketReplicationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go
new file mode 100644
index 000000000..ab73775a1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go
@@ -0,0 +1,235 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Deletes the tags from the bucket.
+//
+// To use this operation, you must have permission to perform the
+// s3:PutBucketTagging action. By default, the bucket owner has this permission and
+// can grant this permission to others.
+//
+// The following operations are related to DeleteBucketTagging :
+//
+// [GetBucketTagging]
+//
+// [PutBucketTagging]
+//
+// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html
+// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html
+func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) {
+ if params == nil {
+ params = &DeleteBucketTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketTagging", params, optFns, c.addOperationDeleteBucketTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketTaggingInput struct {
+
+ // The bucket that has the tag set to be removed.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketTaggingOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketTagging"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketTaggingInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketTagging",
+ }
+}
+
+// getDeleteBucketTaggingBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go
new file mode 100644
index 000000000..6ea3b745d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go
@@ -0,0 +1,244 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// This action removes the website configuration for a bucket. Amazon S3 returns a
+// 200 OK response upon successfully deleting a website configuration on the
+// specified bucket. You will get a 200 OK response if the website configuration
+// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
+// response if the bucket specified in the request does not exist.
+//
+// This DELETE action requires the S3:DeleteBucketWebsite permission. By default,
+// only the bucket owner can delete the website configuration attached to a bucket.
+// However, bucket owners can grant other users permission to delete the website
+// configuration by writing a bucket policy granting them the
+// S3:DeleteBucketWebsite permission.
+//
+// For more information about hosting websites, see [Hosting Websites on Amazon S3].
+//
+// The following operations are related to DeleteBucketWebsite :
+//
+// [GetBucketWebsite]
+//
+// [PutBucketWebsite]
+//
+// [GetBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html
+// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html
+// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html
+func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) {
+ if params == nil {
+ params = &DeleteBucketWebsiteInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBucketWebsite", params, optFns, c.addOperationDeleteBucketWebsiteMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBucketWebsiteOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBucketWebsiteInput struct {
+
+ // The bucket name for which you want to remove the website configuration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeleteBucketWebsiteOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketWebsite"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketWebsite(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteBucketWebsiteInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBucketWebsite",
+ }
+}
+
+// getDeleteBucketWebsiteBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteBucketWebsiteBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteBucketWebsiteInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteBucketWebsiteBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go
new file mode 100644
index 000000000..f57c88ea5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go
@@ -0,0 +1,468 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Removes an object from a bucket. The behavior depends on the bucket's
+// versioning state:
+//
+// - If bucket versioning is not enabled, the operation permanently deletes the
+// object.
+//
+// - If bucket versioning is enabled, the operation inserts a delete marker,
+// which becomes the current version of the object. To permanently delete an object
+// in a versioned bucket, you must include the object’s versionId in the request.
+// For more information about versioning-enabled buckets, see [Deleting object versions from a versioning-enabled bucket].
+//
+// - If bucket versioning is suspended, the operation removes the object that
+// has a null versionId , if there is one, and inserts a delete marker that
+// becomes the current version of the object. If there isn't an object with a null
+// versionId , and all versions of the object have a versionId , Amazon S3 does
+// not remove the object and only inserts a delete marker. To permanently delete an
+// object that has a versionId , you must include the object’s versionId in the
+// request. For more information about versioning-suspended buckets, see [Deleting objects from versioning-suspended buckets].
+//
+// - Directory buckets - S3 Versioning isn't enabled and supported for directory
+// buckets. For this API operation, only the null value of the version ID is
+// supported by directory buckets. You can only specify null to the versionId
+// query parameter in the request.
+//
+// - Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support
+// virtual-hosted-style requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// To remove a specific version, you must use the versionId query parameter. Using
+// this query parameter permanently deletes the version. If the object deleted is a
+// delete marker, Amazon S3 sets the response header x-amz-delete-marker to true.
+//
+// If the object you want to delete is in a bucket where the bucket versioning
+// configuration is MFA Delete enabled, you must include the x-amz-mfa request
+// header in the DELETE versionId request. Requests that include x-amz-mfa must
+// use HTTPS. For more information about MFA Delete, see [Using MFA Delete]in the Amazon S3 User
+// Guide. To see sample requests that use versioning, see [Sample Request].
+//
+// Directory buckets - MFA delete is not supported by directory buckets.
+//
+// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle] ) to
+// enable Amazon S3 to remove them for you. If you want to block users or accounts
+// from removing or deleting objects from your bucket, you must deny them the
+// s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration
+// actions.
+//
+// Directory buckets - S3 Lifecycle is not supported by directory buckets.
+//
+// Permissions
+//
+// - General purpose bucket permissions - The following permissions are required
+// in your policies when your DeleteObjects request includes specific headers.
+//
+// - s3:DeleteObject - To delete an object from a bucket, you must always have
+// the s3:DeleteObject permission.
+//
+// - s3:DeleteObjectVersion - To delete a specific version of an object from a
+// versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following action is related to DeleteObject :
+//
+// [PutObject]
+//
+// [Sample Request]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Deleting objects from versioning-suspended buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [Deleting object versions from a versioning-enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [Using MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html
+func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) {
+ if params == nil {
+ params = &DeleteObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteObject", params, optFns, c.addOperationDeleteObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteObjectInput struct {
+
+ // The bucket name of the bucket containing the object.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Key name of the object to delete.
+ //
+ // This member is required.
+ Key *string
+
+ // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to
+ // process this operation. To use this header, you must have the
+ // s3:BypassGovernanceRetention permission.
+ //
+ // This functionality is not supported for directory buckets.
+ BypassGovernanceRetention *bool
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The If-Match header field makes the request method conditional on ETags. If the
+ // ETag value does not match, the operation returns a 412 Precondition Failed
+ // error. If the ETag matches or if the object doesn't exist, the operation will
+ // return a 204 Success (No Content) response .
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // This functionality is only supported for directory buckets.
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfMatch *string
+
+ // If present, the object is deleted only if its modification times matches the
+ // provided Timestamp . If the Timestamp values do not match, the operation
+ // returns a 412 Precondition Failed error. If the Timestamp matches or if the
+ // object doesn’t exist, the operation returns a 204 Success (No Content) response.
+ //
+ // This functionality is only supported for directory buckets.
+ IfMatchLastModifiedTime *time.Time
+
+ // If present, the object is deleted only if its size matches the provided size in
+ // bytes. If the Size value does not match, the operation returns a 412
+ // Precondition Failed error. If the Size matches or if the object doesn’t exist,
+ // the operation returns a 204 Success (No Content) response.
+ //
+ // This functionality is only supported for directory buckets.
+ //
+ // You can use the If-Match , x-amz-if-match-last-modified-time and
+ // x-amz-if-match-size conditional headers in conjunction with each-other or
+ // individually.
+ IfMatchSize *int64
+
+ // The concatenation of the authentication device's serial number, a space, and
+ // the value that is displayed on your authentication device. Required to
+ // permanently delete a versioned object if versioning is configured with MFA
+ // delete enabled.
+ //
+ // This functionality is not supported for directory buckets.
+ MFA *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Version ID used to reference a specific version of the object.
+ //
+ // For directory buckets in this API operation, only the null value of the version
+ // ID is supported.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type DeleteObjectOutput struct {
+
+ // Indicates whether the specified object version that was permanently deleted was
+ // (true) or was not (false) a delete marker before deletion. In a simple DELETE,
+ // this header indicates whether (true) or not (false) the current version of the
+ // object is a delete marker. To learn more about delete markers, see [Working with delete markers].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html
+ DeleteMarker *bool
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Returns the version ID of the delete marker created as a result of the DELETE
+ // operation.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObject"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteObjectInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteObject",
+ }
+}
+
+// getDeleteObjectBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getDeleteObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
+
+// PresignDeleteObject is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignDeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &DeleteObjectInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "DeleteObject", params, clientOptFns,
+ c.client.addOperationDeleteObjectMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ addDeleteObjectPayloadAsUnsigned,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
+
+func addDeleteObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
+ v4.RemoveContentSHA256HeaderMiddleware(stack)
+ v4.RemoveComputePayloadSHA256Middleware(stack)
+ return v4.AddUnsignedPayloadMiddleware(stack)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go
new file mode 100644
index 000000000..2e69b4317
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go
@@ -0,0 +1,270 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Removes the entire tag set from the specified object. For more information
+// about managing object tags, see [Object Tagging].
+//
+// To use this operation, you must have permission to perform the
+// s3:DeleteObjectTagging action.
+//
+// To delete tags of a specific object version, add the versionId query parameter
+// in the request. You will need permission for the s3:DeleteObjectVersionTagging
+// action.
+//
+// The following operations are related to DeleteObjectTagging :
+//
+// [PutObjectTagging]
+//
+// [GetObjectTagging]
+//
+// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html
+// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html
+// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
+func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) {
+ if params == nil {
+ params = &DeleteObjectTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteObjectTagging", params, optFns, c.addOperationDeleteObjectTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteObjectTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteObjectTaggingInput struct {
+
+ // The bucket name containing the objects from which to remove the tags.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key that identifies the object in the bucket from which to remove all tags.
+ //
+ // This member is required.
+ Key *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The versionId of the object that the tag-set will be removed from.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteObjectTaggingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type DeleteObjectTaggingOutput struct {
+
+ // The versionId of the object the tag-set was removed from.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjectTagging"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjectTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteObjectTaggingInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteObjectTagging",
+ }
+}
+
+// getDeleteObjectTaggingBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeleteObjectTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteObjectTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteObjectTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go
new file mode 100644
index 000000000..a73f2b6d3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go
@@ -0,0 +1,470 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation enables you to delete multiple objects from a bucket using a
+// single HTTP request. If you know the object keys that you want to delete, then
+// this operation provides a suitable alternative to sending individual delete
+// requests, reducing per-request overhead.
+//
+// The request can contain a list of up to 1,000 keys that you want to delete. In
+// the XML, you provide the object key names, and optionally, version IDs if you
+// want to delete a specific version of the object from a versioning-enabled
+// bucket. For each key, Amazon S3 performs a delete operation and returns the
+// result of that delete, success or failure, in the response. If the object
+// specified in the request isn't found, Amazon S3 confirms the deletion by
+// returning the result as deleted.
+//
+// - Directory buckets - S3 Versioning isn't enabled and supported for directory
+// buckets.
+//
+// - Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support
+// virtual-hosted-style requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// The operation supports two modes for the response: verbose and quiet. By
+// default, the operation uses verbose mode in which the response includes the
+// result of deletion of each key in your request. In quiet mode the response
+// includes only keys where the delete operation encountered an error. For a
+// successful deletion in a quiet mode, the operation does not return any
+// information about the delete in the response body.
+//
+// When performing this action on an MFA Delete enabled bucket, that attempts to
+// delete any versioned objects, you must include an MFA token. If you do not
+// provide one, the entire request will fail, even if there are non-versioned
+// objects you are trying to delete. If you provide an invalid token, whether there
+// are versioned keys in the request or not, the entire Multi-Object Delete request
+// will fail. For information about MFA Delete, see [MFA Delete]in the Amazon S3 User Guide.
+//
+// Directory buckets - MFA delete is not supported by directory buckets.
+//
+// Permissions
+//
+// - General purpose bucket permissions - The following permissions are required
+// in your policies when your DeleteObjects request includes specific headers.
+//
+// - s3:DeleteObject - To delete an object from a bucket, you must always specify
+// the s3:DeleteObject permission.
+//
+// - s3:DeleteObjectVersion - To delete a specific version of an object from a
+// versioning-enabled bucket, you must specify the s3:DeleteObjectVersion
+// permission.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// Content-MD5 request header
+//
+// - General purpose bucket - The Content-MD5 request header is required for all
+// Multi-Object Delete requests. Amazon S3 uses the header value to ensure that
+// your request body has not been altered in transit.
+//
+// - Directory bucket - The Content-MD5 request header or a additional checksum
+// request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c ,
+// x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all
+// Multi-Object Delete requests.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following operations are related to DeleteObjects :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [ListParts]
+//
+// [AbortMultipartUpload]
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) {
+ if params == nil {
+ params = &DeleteObjectsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteObjects", params, optFns, c.addOperationDeleteObjectsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteObjectsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteObjectsInput struct {
+
+ // The bucket name containing the objects to delete.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for the request.
+ //
+ // This member is required.
+ Delete *types.Delete
+
+ // Specifies whether you want to delete this object even if it has a
+ // Governance-type Object Lock in place. To use this header, you must have the
+ // s3:BypassGovernanceRetention permission.
+ //
+ // This functionality is not supported for directory buckets.
+ BypassGovernanceRetention *bool
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3
+ // fails the request with the HTTP status code 400 Bad Request .
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the
+ // supported algorithm from the following list:
+ //
+ // - CRC32
+ //
+ // - CRC32C
+ //
+ // - CRC64NVME
+ //
+ // - SHA1
+ //
+ // - SHA256
+ //
+ // For more information, see [Checking object integrity] in the Amazon S3 User Guide.
+ //
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through
+ // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest
+ // error.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The concatenation of the authentication device's serial number, a space, and
+ // the value that is displayed on your authentication device. Required to
+ // permanently delete a versioned object if versioning is configured with MFA
+ // delete enabled.
+ //
+ // When performing the DeleteObjects operation on an MFA delete enabled bucket,
+ // which attempts to delete the specified versioned objects, you must include an
+ // MFA token. If you don't provide an MFA token, the entire request will fail, even
+ // if there are non-versioned objects that you are trying to delete. If you provide
+ // an invalid token, whether there are versioned object keys in the request or not,
+ // the entire Multi-Object Delete request will fail. For information about MFA
+ // Delete, see [MFA Delete]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete
+ MFA *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteObjectsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type DeleteObjectsOutput struct {
+
+ // Container element for a successful delete. It identifies the object that was
+ // successfully deleted.
+ Deleted []types.DeletedObject
+
+ // Container for a failed delete action that describes the object that Amazon S3
+ // attempted to delete and the error it encountered.
+ Errors []types.Error
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjects{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteObjects{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjects"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteObjects(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addDeleteObjectsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeleteObjectsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeleteObjects(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteObjects",
+ }
+}
+
+// getDeleteObjectsRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*DeleteObjectsInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getDeleteObjectsRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getDeleteObjectsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getDeleteObjectsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeleteObjectsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeleteObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeleteObjectsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go
new file mode 100644
index 000000000..87e72df27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go
@@ -0,0 +1,241 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use
+// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For
+// more information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// The following operations are related to DeletePublicAccessBlock :
+//
+// [Using Amazon S3 Block Public Access]
+//
+// [GetPublicAccessBlock]
+//
+// [PutPublicAccessBlock]
+//
+// [GetBucketPolicyStatus]
+//
+// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html
+// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
+func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) {
+ if params == nil {
+ params = &DeletePublicAccessBlockInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeletePublicAccessBlock", params, optFns, c.addOperationDeletePublicAccessBlockMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeletePublicAccessBlockOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeletePublicAccessBlockInput struct {
+
+ // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type DeletePublicAccessBlockOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpDeletePublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeletePublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeletePublicAccessBlock"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeletePublicAccessBlock(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *DeletePublicAccessBlockInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opDeletePublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeletePublicAccessBlock",
+ }
+}
+
+// getDeletePublicAccessBlockBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getDeletePublicAccessBlockBucketMember(input interface{}) (*string, bool) {
+ in := input.(*DeletePublicAccessBlockInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addDeletePublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getDeletePublicAccessBlockBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go
new file mode 100644
index 000000000..0ed2a8670
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go
@@ -0,0 +1,272 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// This implementation of the GET action uses the accelerate subresource to return
+// the Transfer Acceleration state of a bucket, which is either Enabled or
+// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that
+// enables you to perform faster data transfers to and from Amazon S3.
+//
+// To use this operation, you must have permission to perform the
+// s3:GetAccelerateConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide.
+//
+// You set the Transfer Acceleration state of an existing bucket to Enabled or
+// Suspended by using the [PutBucketAccelerateConfiguration] operation.
+//
+// A GET accelerate request does not return a state value for a bucket that has no
+// transfer acceleration state. A bucket has no Transfer Acceleration state if a
+// state has never been set on the bucket.
+//
+// For more information about transfer acceleration, see [Transfer Acceleration] in the Amazon S3 User
+// Guide.
+//
+// The following operations are related to GetBucketAccelerateConfiguration :
+//
+// [PutBucketAccelerateConfiguration]
+//
+// [PutBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketAccelerateConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketAccelerateConfiguration", params, optFns, c.addOperationGetBucketAccelerateConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketAccelerateConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketAccelerateConfigurationInput struct {
+
+ // The name of the bucket for which the accelerate configuration is retrieved.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketAccelerateConfigurationOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // The accelerate configuration of the bucket.
+ Status types.BucketAccelerateStatus
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAccelerateConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAccelerateConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAccelerateConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketAccelerateConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketAccelerateConfiguration",
+ }
+}
+
+// getGetBucketAccelerateConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketAccelerateConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketAccelerateConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go
new file mode 100644
index 000000000..fed95ac01
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go
@@ -0,0 +1,267 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// This implementation of the GET action uses the acl subresource to return the
+// access control list (ACL) of a bucket. To use GET to return the ACL of the
+// bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission
+// is granted to the anonymous user, you can return the ACL of the bucket without
+// using an authorization header.
+//
+// When you use this API operation with an access point, provide the alias of the
+// access point in place of the bucket name.
+//
+// When you use this API operation with an Object Lambda access point, provide the
+// alias of the Object Lambda access point in place of the bucket name. If the
+// Object Lambda access point alias in a request is not valid, the error code
+// InvalidAccessPointAliasError is returned. For more information about
+// InvalidAccessPointAliasError , see [List of Error Codes].
+//
+// If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
+// requests to read ACLs are still supported and return the
+// bucket-owner-full-control ACL with the owner being the account that created the
+// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide.
+//
+// The following operations are related to GetBucketAcl :
+//
+// [ListObjects]
+//
+// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
+// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) {
+ if params == nil {
+ params = &GetBucketAclInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketAcl", params, optFns, c.addOperationGetBucketAclMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketAclOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketAclInput struct {
+
+ // Specifies the S3 bucket whose ACL is being requested.
+ //
+ // When you use this API operation with an access point, provide the alias of the
+ // access point in place of the bucket name.
+ //
+ // When you use this API operation with an Object Lambda access point, provide the
+ // alias of the Object Lambda access point in place of the bucket name. If the
+ // Object Lambda access point alias in a request is not valid, the error code
+ // InvalidAccessPointAliasError is returned. For more information about
+ // InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketAclOutput struct {
+
+ // A list of grants.
+ Grants []types.Grant
+
+ // Container for the bucket owner's display name and ID.
+ Owner *types.Owner
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAcl"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketAclValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAcl(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketAclInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketAcl",
+ }
+}
+
+// getGetBucketAclBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketAclBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketAclInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketAclBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go
new file mode 100644
index 000000000..b0423c85e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go
@@ -0,0 +1,256 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// This implementation of the GET action returns an analytics configuration
+// (identified by the analytics configuration ID) from the bucket.
+//
+// To use this operation, you must have permissions to perform the
+// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide.
+//
+// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis] in the Amazon S3 User
+// Guide.
+//
+// The following operations are related to GetBucketAnalyticsConfiguration :
+//
+// [DeleteBucketAnalyticsConfiguration]
+//
+// [ListBucketAnalyticsConfigurations]
+//
+// [PutBucketAnalyticsConfiguration]
+//
+// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html
+// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html
+// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketAnalyticsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketAnalyticsConfiguration", params, optFns, c.addOperationGetBucketAnalyticsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketAnalyticsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketAnalyticsConfigurationInput struct {
+
+ // The name of the bucket from which an analytics configuration is retrieved.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID that identifies the analytics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketAnalyticsConfigurationOutput struct {
+
+ // The configuration and any analyses for the analytics filter.
+ AnalyticsConfiguration *types.AnalyticsConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAnalyticsConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketAnalyticsConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketAnalyticsConfiguration",
+ }
+}
+
+// getGetBucketAnalyticsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketAnalyticsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketAnalyticsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go
new file mode 100644
index 000000000..ef6a970da
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go
@@ -0,0 +1,266 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the Cross-Origin Resource Sharing (CORS) configuration information set
+// for the bucket.
+//
+// To use this operation, you must have permission to perform the s3:GetBucketCORS
+// action. By default, the bucket owner has this permission and can grant it to
+// others.
+//
+// When you use this API operation with an access point, provide the alias of the
+// access point in place of the bucket name.
+//
+// When you use this API operation with an Object Lambda access point, provide the
+// alias of the Object Lambda access point in place of the bucket name. If the
+// Object Lambda access point alias in a request is not valid, the error code
+// InvalidAccessPointAliasError is returned. For more information about
+// InvalidAccessPointAliasError , see [List of Error Codes].
+//
+// For more information about CORS, see [Enabling Cross-Origin Resource Sharing].
+//
+// The following operations are related to GetBucketCors :
+//
+// [PutBucketCors]
+//
+// [DeleteBucketCors]
+//
+// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html
+// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
+// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html
+func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) {
+ if params == nil {
+ params = &GetBucketCorsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketCors", params, optFns, c.addOperationGetBucketCorsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketCorsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketCorsInput struct {
+
+ // The bucket name for which to get the cors configuration.
+ //
+ // When you use this API operation with an access point, provide the alias of the
+ // access point in place of the bucket name.
+ //
+ // When you use this API operation with an Object Lambda access point, provide the
+ // alias of the Object Lambda access point in place of the bucket name. If the
+ // Object Lambda access point alias in a request is not valid, the error code
+ // InvalidAccessPointAliasError is returned. For more information about
+ // InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketCorsOutput struct {
+
+ // A set of origins and methods (cross-origin access that you want to allow). You
+ // can add up to 100 rules to the configuration.
+ CORSRules []types.CORSRule
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketCors"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketCors(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketCorsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketCors(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketCors",
+ }
+}
+
+// getGetBucketCorsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketCorsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketCorsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketCorsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go
new file mode 100644
index 000000000..82b7211a1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go
@@ -0,0 +1,281 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the default encryption configuration for an Amazon S3 bucket. By
+// default, all buckets have a default encryption configuration that uses
+// server-side encryption with Amazon S3 managed keys (SSE-S3).
+//
+// - General purpose buckets - For information about the bucket default
+// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide.
+//
+// - Directory buckets - For directory buckets, there are only two supported
+// options for server-side encryption: SSE-S3 and SSE-KMS. For information about
+// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets].
+//
+// Permissions
+//
+// - General purpose bucket permissions - The s3:GetEncryptionConfiguration
+// permission is required in a policy. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// - Directory bucket permissions - To grant access to this API operation, you
+// must have the s3express:GetEncryptionConfiguration permission in an IAM
+// identity-based policy instead of a bucket policy. Cross-account access to this
+// API operation isn't supported. This operation can only be performed by the
+// Amazon Web Services account that owns the resource. For more information about
+// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region-code.amazonaws.com .
+//
+// The following operations are related to GetBucketEncryption :
+//
+// [PutBucketEncryption]
+//
+// [DeleteBucketEncryption]
+//
+// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html
+// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html
+// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html
+// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) {
+ if params == nil {
+ params = &GetBucketEncryptionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketEncryption", params, optFns, c.addOperationGetBucketEncryptionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketEncryptionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketEncryptionInput struct {
+
+ // The name of the bucket from which the server-side encryption configuration is
+ // retrieved.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region-code.amazonaws.com/bucket-name .
+ // Virtual-hosted-style requests aren't supported. Directory bucket names must be
+ // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must
+ // also follow the format bucket-base-name--zone-id--x-s3 (for example,
+ // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketEncryptionOutput struct {
+
+ // Specifies the default server-side-encryption configuration.
+ ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketEncryption"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketEncryption(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketEncryptionInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketEncryption",
+ }
+}
+
+// getGetBucketEncryptionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketEncryptionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketEncryptionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketEncryptionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go
new file mode 100644
index 000000000..5a500f0a6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go
@@ -0,0 +1,257 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Gets the S3 Intelligent-Tiering configuration from the specified bucket.
+//
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
+// without performance impact or operational overhead. S3 Intelligent-Tiering
+// delivers automatic cost savings in three low latency and high throughput access
+// tiers. To get the lowest storage cost on data that can be accessed in minutes to
+// hours, you can choose to activate additional archiving capabilities.
+//
+// The S3 Intelligent-Tiering storage class is the ideal storage class for data
+// with unknown, changing, or unpredictable access patterns, independent of object
+// size or retention period. If the size of an object is less than 128 KB, it is
+// not monitored and not eligible for auto-tiering. Smaller objects can be stored,
+// but they are always charged at the Frequent Access tier rates in the S3
+// Intelligent-Tiering storage class.
+//
+// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// Operations related to GetBucketIntelligentTieringConfiguration include:
+//
+// [DeleteBucketIntelligentTieringConfiguration]
+//
+// [PutBucketIntelligentTieringConfiguration]
+//
+// [ListBucketIntelligentTieringConfigurations]
+//
+// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html
+// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
+// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html
+func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketIntelligentTieringConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketIntelligentTieringConfiguration", params, optFns, c.addOperationGetBucketIntelligentTieringConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketIntelligentTieringConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketIntelligentTieringConfigurationInput struct {
+
+ // The name of the Amazon S3 bucket whose configuration you want to modify or
+ // retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ Id *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketIntelligentTieringConfigurationOutput struct {
+
+ // Container for S3 Intelligent-Tiering configuration.
+ IntelligentTieringConfiguration *types.IntelligentTieringConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketIntelligentTieringConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketIntelligentTieringConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketIntelligentTieringConfiguration",
+ }
+}
+
+// getGetBucketIntelligentTieringConfigurationBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getGetBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketIntelligentTieringConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketIntelligentTieringConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go
new file mode 100644
index 000000000..163c86ae7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go
@@ -0,0 +1,255 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns an inventory configuration (identified by the inventory configuration
+// ID) from the bucket.
+//
+// To use this operation, you must have permissions to perform the
+// s3:GetInventoryConfiguration action. The bucket owner has this permission by
+// default and can grant this permission to others. For more information about
+// permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory].
+//
+// The following operations are related to GetBucketInventoryConfiguration :
+//
+// [DeleteBucketInventoryConfiguration]
+//
+// [ListBucketInventoryConfigurations]
+//
+// [PutBucketInventoryConfiguration]
+//
+// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html
+// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html
+func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketInventoryConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketInventoryConfiguration", params, optFns, c.addOperationGetBucketInventoryConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketInventoryConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketInventoryConfigurationInput struct {
+
+ // The name of the bucket containing the inventory configuration to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the inventory configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketInventoryConfigurationOutput struct {
+
+ // Specifies the inventory configuration.
+ InventoryConfiguration *types.InventoryConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketInventoryConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketInventoryConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketInventoryConfiguration",
+ }
+}
+
+// getGetBucketInventoryConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketInventoryConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketInventoryConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go
new file mode 100644
index 000000000..5803ff5b5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go
@@ -0,0 +1,321 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the lifecycle configuration information set on the bucket. For
+// information about lifecycle configuration, see [Object Lifecycle Management].
+//
+// Bucket lifecycle configuration now supports specifying a lifecycle rule using
+// an object key name prefix, one or more object tags, object size, or any
+// combination of these. Accordingly, this section describes the latest API, which
+// is compatible with the new functionality. The previous version of the API
+// supported filtering based only on an object key name prefix, which is supported
+// for general purpose buckets for backward compatibility. For the related API
+// description, see [GetBucketLifecycle].
+//
+// Lifecyle configurations for directory buckets only support expiring objects and
+// cancelling multipart uploads. Expiring of versioned objects, transitions and tag
+// filters are not supported.
+//
+// Permissions
+// - General purpose bucket permissions - By default, all Amazon S3 resources
+// are private, including buckets, objects, and related subresources (for example,
+// lifecycle configuration and website configuration). Only the resource owner
+// (that is, the Amazon Web Services account that created it) can access the
+// resource. The resource owner can optionally grant access permissions to others
+// by writing an access policy. For this operation, a user must have the
+// s3:GetLifecycleConfiguration permission.
+//
+// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// - Directory bucket permissions - You must have the
+// s3express:GetLifecycleConfiguration permission in an IAM identity-based policy
+// to use this operation. Cross-account access to this API operation isn't
+// supported. The resource owner can optionally grant access permissions to others
+// by creating a role or user for them as long as they are within the same account
+// as the owner and resource.
+//
+// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in
+//
+// the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+//
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region-code.amazonaws.com/bucket-name
+// . Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
+//
+// GetBucketLifecycleConfiguration has the following special error:
+//
+// - Error code: NoSuchLifecycleConfiguration
+//
+// - Description: The lifecycle configuration does not exist.
+//
+// - HTTP Status Code: 404 Not Found
+//
+// - SOAP Fault Code Prefix: Client
+//
+// The following operations are related to GetBucketLifecycleConfiguration :
+//
+// [GetBucketLifecycle]
+//
+// [PutBucketLifecycle]
+//
+// [DeleteBucketLifecycle]
+//
+// [GetBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html
+// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
+// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketLifecycleConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketLifecycleConfiguration", params, optFns, c.addOperationGetBucketLifecycleConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketLifecycleConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketLifecycleConfigurationInput struct {
+
+ // The name of the bucket for which to get the lifecycle information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketLifecycleConfigurationOutput struct {
+
+ // Container for a lifecycle rule.
+ Rules []types.LifecycleRule
+
+ // Indicates which default minimum object size behavior is applied to the
+ // lifecycle configuration.
+ //
+ // This parameter applies to general purpose buckets only. It isn't supported for
+ // directory bucket lifecycle configurations.
+ //
+ // - all_storage_classes_128K - Objects smaller than 128 KB will not transition
+ // to any storage class by default.
+ //
+ // - varies_by_storage_class - Objects smaller than 128 KB will transition to
+ // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default,
+ // all other storage classes will prevent transitions smaller than 128 KB.
+ //
+ // To customize the minimum object size for any transition you can add a filter
+ // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body
+ // of your transition rule. Custom filters always take precedence over the default
+ // transition behavior.
+ TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLifecycleConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLifecycleConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLifecycleConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketLifecycleConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketLifecycleConfiguration",
+ }
+}
+
+// getGetBucketLifecycleConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketLifecycleConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketLifecycleConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go
new file mode 100644
index 000000000..3d1397b7f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go
@@ -0,0 +1,334 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the Region the bucket resides in. You set the bucket's Region using the
+// LocationConstraint request parameter in a CreateBucket request. For more
+// information, see [CreateBucket].
+//
+// When you use this API operation with an access point, provide the alias of the
+// access point in place of the bucket name.
+//
+// When you use this API operation with an Object Lambda access point, provide the
+// alias of the Object Lambda access point in place of the bucket name. If the
+// Object Lambda access point alias in a request is not valid, the error code
+// InvalidAccessPointAliasError is returned. For more information about
+// InvalidAccessPointAliasError , see [List of Error Codes].
+//
+// We recommend that you use [HeadBucket] to return the Region that a bucket resides in. For
+// backward compatibility, Amazon S3 continues to support GetBucketLocation.
+//
+// The following operations are related to GetBucketLocation :
+//
+// [GetObject]
+//
+// [CreateBucket]
+//
+// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html
+func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) {
+ if params == nil {
+ params = &GetBucketLocationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketLocation", params, optFns, c.addOperationGetBucketLocationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketLocationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketLocationInput struct {
+
+ // The name of the bucket for which to get the location.
+ //
+ // When you use this API operation with an access point, provide the alias of the
+ // access point in place of the bucket name.
+ //
+ // When you use this API operation with an Object Lambda access point, provide the
+ // alias of the Object Lambda access point in place of the bucket name. If the
+ // Object Lambda access point alias in a request is not valid, the error code
+ // InvalidAccessPointAliasError is returned. For more information about
+ // InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketLocationOutput struct {
+
+ // Specifies the Region where the bucket resides. For a list of all the Amazon S3
+ // supported location constraints by Region, see [Regions and Endpoints].
+ //
+ // Buckets in Region us-east-1 have a LocationConstraint of null . Buckets with a
+ // LocationConstraint of EU reside in eu-west-1 .
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ LocationConstraint types.BucketLocationConstraint
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLocation{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLocation{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLocation"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = swapDeserializerHelper(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLocation(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketLocation_custom struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketLocation_custom) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketLocation_custom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata)
+ }
+ output := &GetBucketLocationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, xml.StartElement{})
+ err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder)
+ if err == io.EOF {
+ err = nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return out, metadata, err
+}
+
+// Helper to swap in a custom deserializer
+func swapDeserializerHelper(stack *middleware.Stack) error {
+ _, err := stack.Deserialize.Swap("OperationDeserializer", &awsRestxml_deserializeOpGetBucketLocation_custom{})
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketLocationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketLocation(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketLocation",
+ }
+}
+
+// getGetBucketLocationBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketLocationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketLocationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketLocationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketLocationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go
new file mode 100644
index 000000000..c1f315fab
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go
@@ -0,0 +1,241 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the logging status of a bucket and the permissions users have to view
+// and modify that status.
+//
+// The following operations are related to GetBucketLogging :
+//
+// [CreateBucket]
+//
+// [PutBucketLogging]
+//
+// [PutBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) {
+ if params == nil {
+ params = &GetBucketLoggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketLogging", params, optFns, c.addOperationGetBucketLoggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketLoggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketLoggingInput struct {
+
+ // The bucket name for which to get the logging information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketLoggingOutput struct {
+
+ // Describes where logs are stored and the prefix that Amazon S3 assigns to all
+ // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API
+ // Reference.
+ //
+ // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html
+ LoggingEnabled *types.LoggingEnabled
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLogging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketLogging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLogging"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketLogging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketLoggingInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketLogging",
+ }
+}
+
+// getGetBucketLoggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketLoggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketLoggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketLoggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go
new file mode 100644
index 000000000..5d5e9685f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go
@@ -0,0 +1,242 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Retrieves the metadata table configuration for a general purpose bucket. For
+//
+// more information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide.
+//
+// Permissions To use this operation, you must have the
+// s3:GetBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables]
+// in the Amazon S3 User Guide.
+//
+// The following operations are related to GetBucketMetadataTableConfiguration :
+//
+// [CreateBucketMetadataTableConfiguration]
+//
+// [DeleteBucketMetadataTableConfiguration]
+//
+// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html
+// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html
+// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html
+// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html
+func (c *Client) GetBucketMetadataTableConfiguration(ctx context.Context, params *GetBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*GetBucketMetadataTableConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketMetadataTableConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketMetadataTableConfiguration", params, optFns, c.addOperationGetBucketMetadataTableConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketMetadataTableConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketMetadataTableConfigurationInput struct {
+
+ // The general purpose bucket that contains the metadata table configuration that
+ // you want to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The expected owner of the general purpose bucket that you want to retrieve the
+ // metadata table configuration from.
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketMetadataTableConfigurationOutput struct {
+
+ // The metadata table configuration for the general purpose bucket.
+ GetBucketMetadataTableConfigurationResult *types.GetBucketMetadataTableConfigurationResult
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetadataTableConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetadataTableConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetadataTableConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketMetadataTableConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketMetadataTableConfiguration",
+ }
+}
+
+// getGetBucketMetadataTableConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketMetadataTableConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketMetadataTableConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go
new file mode 100644
index 000000000..4fdf2ef42
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go
@@ -0,0 +1,258 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Gets a metrics configuration (specified by the metrics configuration ID) from
+// the bucket. Note that this doesn't include the daily storage metrics.
+//
+// To use this operation, you must have permissions to perform the
+// s3:GetMetricsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch].
+//
+// The following operations are related to GetBucketMetricsConfiguration :
+//
+// [PutBucketMetricsConfiguration]
+//
+// [DeleteBucketMetricsConfiguration]
+//
+// [ListBucketMetricsConfigurations]
+//
+// [Monitoring Metrics with Amazon CloudWatch]
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html
+// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html
+// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketMetricsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketMetricsConfiguration", params, optFns, c.addOperationGetBucketMetricsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketMetricsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketMetricsConfigurationInput struct {
+
+ // The name of the bucket containing the metrics configuration to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the metrics configuration. The ID has a 64 character
+ // limit and can only contain letters, numbers, periods, dashes, and underscores.
+ //
+ // This member is required.
+ Id *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketMetricsConfigurationOutput struct {
+
+ // Specifies the metrics configuration.
+ MetricsConfiguration *types.MetricsConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetricsConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketMetricsConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketMetricsConfiguration",
+ }
+}
+
+// getGetBucketMetricsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketMetricsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketMetricsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go
new file mode 100644
index 000000000..1fb7d1643
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go
@@ -0,0 +1,281 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the notification configuration of a bucket.
+//
+// If notifications are not enabled on the bucket, the action returns an empty
+// NotificationConfiguration element.
+//
+// By default, you must be the bucket owner to read the notification configuration
+// of a bucket. However, the bucket owner can use a bucket policy to grant
+// permission to other users to read this configuration with the
+// s3:GetBucketNotification permission.
+//
+// When you use this API operation with an access point, provide the alias of the
+// access point in place of the bucket name.
+//
+// When you use this API operation with an Object Lambda access point, provide the
+// alias of the Object Lambda access point in place of the bucket name. If the
+// Object Lambda access point alias in a request is not valid, the error code
+// InvalidAccessPointAliasError is returned. For more information about
+// InvalidAccessPointAliasError , see [List of Error Codes].
+//
+// For more information about setting and reading the notification configuration
+// on a bucket, see [Setting Up Notification of Bucket Events]. For more information about bucket policies, see [Using Bucket Policies].
+//
+// The following action is related to GetBucketNotification :
+//
+// [PutBucketNotification]
+//
+// [Using Bucket Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [Setting Up Notification of Bucket Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+// [PutBucketNotification]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html
+func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) {
+ if params == nil {
+ params = &GetBucketNotificationConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketNotificationConfiguration", params, optFns, c.addOperationGetBucketNotificationConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketNotificationConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketNotificationConfigurationInput struct {
+
+ // The name of the bucket for which to get the notification configuration.
+ //
+ // When you use this API operation with an access point, provide the alias of the
+ // access point in place of the bucket name.
+ //
+ // When you use this API operation with an Object Lambda access point, provide the
+ // alias of the Object Lambda access point in place of the bucket name. If the
+ // Object Lambda access point alias in a request is not valid, the error code
+ // InvalidAccessPointAliasError is returned. For more information about
+ // InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+// A container for specifying the notification configuration of the bucket. If
+// this element is empty, notifications are turned off for the bucket.
+type GetBucketNotificationConfigurationOutput struct {
+
+ // Enables delivery of events to Amazon EventBridge.
+ EventBridgeConfiguration *types.EventBridgeConfiguration
+
+ // Describes the Lambda functions to invoke and the events for which to invoke
+ // them.
+ LambdaFunctionConfigurations []types.LambdaFunctionConfiguration
+
+ // The Amazon Simple Queue Service queues to publish messages to and the events
+ // for which to publish messages.
+ QueueConfigurations []types.QueueConfiguration
+
+ // The topic to which notifications are sent and the events for which
+ // notifications are generated.
+ TopicConfigurations []types.TopicConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketNotificationConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketNotificationConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketNotificationConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketNotificationConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketNotificationConfiguration",
+ }
+}
+
+// getGetBucketNotificationConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getGetBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketNotificationConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketNotificationConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go
new file mode 100644
index 000000000..cdc07a580
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go
@@ -0,0 +1,241 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you
+// must have the s3:GetBucketOwnershipControls permission. For more information
+// about Amazon S3 permissions, see [Specifying permissions in a policy].
+//
+// For information about Amazon S3 Object Ownership, see [Using Object Ownership].
+//
+// The following operations are related to GetBucketOwnershipControls :
+//
+// # PutBucketOwnershipControls
+//
+// # DeleteBucketOwnershipControls
+//
+// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html
+func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) {
+ if params == nil {
+ params = &GetBucketOwnershipControlsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketOwnershipControls", params, optFns, c.addOperationGetBucketOwnershipControlsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketOwnershipControlsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketOwnershipControlsInput struct {
+
+ // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketOwnershipControlsOutput struct {
+
+ // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or
+ // ObjectWriter) currently in effect for this Amazon S3 bucket.
+ OwnershipControls *types.OwnershipControls
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketOwnershipControls"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketOwnershipControls(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketOwnershipControlsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketOwnershipControls",
+ }
+}
+
+// getGetBucketOwnershipControlsBucketMember returns a pointer to string denoting
+// a provided bucket member valueand a boolean indicating if the input has a
+// modeled bucket name,
+func getGetBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketOwnershipControlsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketOwnershipControlsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go
new file mode 100644
index 000000000..26bd4a775
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go
@@ -0,0 +1,308 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the policy of a specified bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions If you are using an identity other than the root user of the Amazon
+// Web Services account that owns the bucket, the calling identity must both have
+// the GetBucketPolicy permissions on the specified bucket and belong to the
+// bucket owner's account in order to use this operation.
+//
+// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access
+// Denied error. If you have the correct permissions, but you're not using an
+// identity that belongs to the bucket owner's account, Amazon S3 returns a 405
+// Method Not Allowed error.
+//
+// To ensure that bucket owners don't inadvertently lock themselves out of their
+// own buckets, the root principal in a bucket owner's Amazon Web Services account
+// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API
+// actions, even if their bucket policy explicitly denies the root principal's
+// access. Bucket owner root principals can only be blocked from performing these
+// API actions by VPC endpoint policies and Amazon Web Services Organizations
+// policies.
+//
+// - General purpose bucket permissions - The s3:GetBucketPolicy permission is
+// required in a policy. For more information about general purpose buckets bucket
+// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation, you
+// must have the s3express:GetBucketPolicy permission in an IAM identity-based
+// policy instead of a bucket policy. Cross-account access to this API operation
+// isn't supported. This operation can only be performed by the Amazon Web Services
+// account that owns the resource. For more information about directory bucket
+// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples]
+// in the Amazon S3 User Guide.
+//
+// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region-code.amazonaws.com .
+//
+// The following action is related to GetBucketPolicy :
+//
+// [GetObject]
+//
+// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) {
+ if params == nil {
+ params = &GetBucketPolicyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicy", params, optFns, c.addOperationGetBucketPolicyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketPolicyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketPolicyInput struct {
+
+ // The bucket name to get the bucket policy for.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region-code.amazonaws.com/bucket-name .
+ // Virtual-hosted-style requests aren't supported. Directory bucket names must be
+ // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must
+ // also follow the format bucket-base-name--zone-id--x-s3 (for example,
+ // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide
+ //
+ // Access points - When you use this API operation with an access point, provide
+ // the alias of the access point in place of the bucket name.
+ //
+ // Object Lambda access points - When you use this API operation with an Object
+ // Lambda access point, provide the alias of the Object Lambda access point in
+ // place of the bucket name. If the Object Lambda access point alias in a request
+ // is not valid, the error code InvalidAccessPointAliasError is returned. For more
+ // information about InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketPolicyOutput struct {
+
+ // The bucket policy as a JSON document.
+ Policy *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicy"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketPolicyInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketPolicy",
+ }
+}
+
+// getGetBucketPolicyBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketPolicyBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketPolicyInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketPolicyBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go
new file mode 100644
index 000000000..db63caf6f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go
@@ -0,0 +1,249 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Retrieves the policy status for an Amazon S3 bucket, indicating whether the
+// bucket is public. In order to use this operation, you must have the
+// s3:GetBucketPolicyStatus permission. For more information about Amazon S3
+// permissions, see [Specifying Permissions in a Policy].
+//
+// For more information about when Amazon S3 considers a bucket public, see [The Meaning of "Public"].
+//
+// The following operations are related to GetBucketPolicyStatus :
+//
+// [Using Amazon S3 Block Public Access]
+//
+// [GetPublicAccessBlock]
+//
+// [PutPublicAccessBlock]
+//
+// [DeletePublicAccessBlock]
+//
+// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html
+// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html
+// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html
+// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
+func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) {
+ if params == nil {
+ params = &GetBucketPolicyStatusInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketPolicyStatus", params, optFns, c.addOperationGetBucketPolicyStatusMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketPolicyStatusOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketPolicyStatusInput struct {
+
+ // The name of the Amazon S3 bucket whose policy status you want to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketPolicyStatusOutput struct {
+
+ // The policy status for the specified bucket.
+ PolicyStatus *types.PolicyStatus
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicyStatus{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketPolicyStatus{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicyStatus"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketPolicyStatus(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketPolicyStatusInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketPolicyStatus(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketPolicyStatus",
+ }
+}
+
+// getGetBucketPolicyStatusBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketPolicyStatusBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketPolicyStatusInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketPolicyStatusUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketPolicyStatusBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go
new file mode 100644
index 000000000..ebc0d35cf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go
@@ -0,0 +1,256 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the replication configuration of a bucket.
+//
+// It can take a while to propagate the put or delete a replication configuration
+// to all Amazon S3 systems. Therefore, a get request soon after put or delete can
+// return a wrong result.
+//
+// For information about replication configuration, see [Replication] in the Amazon S3 User
+// Guide.
+//
+// This action requires permissions for the s3:GetReplicationConfiguration action.
+// For more information about permissions, see [Using Bucket Policies and User Policies].
+//
+// If you include the Filter element in a replication configuration, you must also
+// include the DeleteMarkerReplication and Priority elements. The response also
+// returns those elements.
+//
+// For information about GetBucketReplication errors, see [List of replication-related error codes]
+//
+// The following operations are related to GetBucketReplication :
+//
+// [PutBucketReplication]
+//
+// [DeleteBucketReplication]
+//
+// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html
+// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html
+// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList
+// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html
+func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) {
+ if params == nil {
+ params = &GetBucketReplicationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketReplication", params, optFns, c.addOperationGetBucketReplicationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketReplicationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketReplicationInput struct {
+
+ // The bucket name for which to get the replication information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketReplicationOutput struct {
+
+ // A container for replication rules. You can add up to 1,000 rules. The maximum
+ // size of a replication configuration is 2 MB.
+ ReplicationConfiguration *types.ReplicationConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketReplication"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketReplication(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketReplicationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketReplication",
+ }
+}
+
+// getGetBucketReplicationBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketReplicationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketReplicationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketReplicationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go
new file mode 100644
index 000000000..34563cb0d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go
@@ -0,0 +1,235 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the request payment configuration of a bucket. To use this version of
+// the operation, you must be the bucket owner. For more information, see [Requester Pays Buckets].
+//
+// The following operations are related to GetBucketRequestPayment :
+//
+// [ListObjects]
+//
+// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
+// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) {
+ if params == nil {
+ params = &GetBucketRequestPaymentInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketRequestPayment", params, optFns, c.addOperationGetBucketRequestPaymentMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketRequestPaymentOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketRequestPaymentInput struct {
+
+ // The name of the bucket for which to get the payment request configuration
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketRequestPaymentOutput struct {
+
+ // Specifies who pays for the download and request fees.
+ Payer types.Payer
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketRequestPayment{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketRequestPayment{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketRequestPayment"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketRequestPayment(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketRequestPaymentInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketRequestPayment",
+ }
+}
+
+// getGetBucketRequestPaymentBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketRequestPaymentBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketRequestPaymentInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketRequestPaymentBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go
new file mode 100644
index 000000000..a1d8d99c4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go
@@ -0,0 +1,248 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the tag set associated with the bucket.
+//
+// To use this operation, you must have permission to perform the
+// s3:GetBucketTagging action. By default, the bucket owner has this permission and
+// can grant this permission to others.
+//
+// GetBucketTagging has the following special error:
+//
+// - Error code: NoSuchTagSet
+//
+// - Description: There is no tag set associated with the bucket.
+//
+// The following operations are related to GetBucketTagging :
+//
+// [PutBucketTagging]
+//
+// [DeleteBucketTagging]
+//
+// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html
+// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html
+func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) {
+ if params == nil {
+ params = &GetBucketTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketTagging", params, optFns, c.addOperationGetBucketTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketTaggingInput struct {
+
+ // The name of the bucket for which to get the tagging information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketTaggingOutput struct {
+
+ // Contains the tag set.
+ //
+ // This member is required.
+ TagSet []types.Tag
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketTagging"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketTaggingInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketTagging",
+ }
+}
+
+// getGetBucketTaggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go
new file mode 100644
index 000000000..6e69fd467
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go
@@ -0,0 +1,250 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the versioning state of a bucket.
+//
+// To retrieve the versioning state of a bucket, you must be the bucket owner.
+//
+// This implementation also returns the MFA Delete status of the versioning state.
+// If the MFA Delete status is enabled , the bucket owner must use an
+// authentication device to change the versioning state of the bucket.
+//
+// The following operations are related to GetBucketVersioning :
+//
+// [GetObject]
+//
+// [PutObject]
+//
+// [DeleteObject]
+//
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) {
+ if params == nil {
+ params = &GetBucketVersioningInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketVersioning", params, optFns, c.addOperationGetBucketVersioningMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketVersioningOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketVersioningInput struct {
+
+ // The name of the bucket for which to get the versioning information.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketVersioningOutput struct {
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA delete.
+ // If the bucket has never been so configured, this element is not returned.
+ MFADelete types.MFADeleteStatus
+
+ // The versioning state of the bucket.
+ Status types.BucketVersioningStatus
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketVersioning{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketVersioning{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketVersioning"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketVersioning(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketVersioningInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketVersioning",
+ }
+}
+
+// getGetBucketVersioningBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetBucketVersioningBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketVersioningInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketVersioningBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go
new file mode 100644
index 000000000..ce0847c8c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go
@@ -0,0 +1,254 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the website configuration for a bucket. To host website on Amazon S3,
+// you can configure a bucket as website by adding a website configuration. For
+// more information about hosting websites, see [Hosting Websites on Amazon S3].
+//
+// This GET action requires the S3:GetBucketWebsite permission. By default, only
+// the bucket owner can read the bucket website configuration. However, bucket
+// owners can allow other users to read the website configuration by writing a
+// bucket policy granting them the S3:GetBucketWebsite permission.
+//
+// The following operations are related to GetBucketWebsite :
+//
+// [DeleteBucketWebsite]
+//
+// [PutBucketWebsite]
+//
+// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html
+// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html
+// [DeleteBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html
+func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) {
+ if params == nil {
+ params = &GetBucketWebsiteInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetBucketWebsite", params, optFns, c.addOperationGetBucketWebsiteMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetBucketWebsiteOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetBucketWebsiteInput struct {
+
+ // The bucket name for which to get the website configuration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetBucketWebsiteOutput struct {
+
+ // The object key name of the website error document to use for 4XX class errors.
+ ErrorDocument *types.ErrorDocument
+
+ // The name of the index document for the website (for example index.html ).
+ IndexDocument *types.IndexDocument
+
+ // Specifies the redirect behavior of all requests to a website endpoint of an
+ // Amazon S3 bucket.
+ RedirectAllRequestsTo *types.RedirectAllRequestsTo
+
+ // Rules that define when a redirect is applied and the redirect behavior.
+ RoutingRules []types.RoutingRule
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketWebsite"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketWebsite(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetBucketWebsiteInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetBucketWebsite",
+ }
+}
+
+// getGetBucketWebsiteBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetBucketWebsiteBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetBucketWebsiteInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetBucketWebsiteBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go
new file mode 100644
index 000000000..6fdc8ec9a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go
@@ -0,0 +1,883 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "time"
+)
+
+// Retrieves an object from Amazon S3.
+//
+// In the GetObject request, specify the full key name for the object.
+//
+// General purpose buckets - Both the virtual-hosted-style requests and the
+// path-style requests are supported. For a virtual hosted-style request example,
+// if you have the object photos/2006/February/sample.jpg , specify the object key
+// name as /photos/2006/February/sample.jpg . For a path-style request example, if
+// you have the object photos/2006/February/sample.jpg in the bucket named
+// examplebucket , specify the object key name as
+// /examplebucket/photos/2006/February/sample.jpg . For more information about
+// request types, see [HTTP Host Header Bucket Specification]in the Amazon S3 User Guide.
+//
+// Directory buckets - Only virtual-hosted-style requests are supported. For a
+// virtual hosted-style request example, if you have the object
+// photos/2006/February/sample.jpg in the bucket named
+// amzn-s3-demo-bucket--usw2-az1--x-s3 , specify the object key name as
+// /photos/2006/February/sample.jpg . Also, when you make requests to this API
+// operation, your requests are sent to the Zonal endpoint. These endpoints support
+// virtual-hosted-style requests in the format
+// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name .
+// Path-style requests are not supported. For more information about endpoints in
+// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about
+// endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+// - General purpose bucket permissions - You must have the required permissions
+// in a policy. To use GetObject , you must have the READ access to the object
+// (or version). If you grant READ access to the anonymous user, the GetObject
+// operation returns the object without using an authorization header. For more
+// information, see [Specifying permissions in a policy]in the Amazon S3 User Guide.
+//
+// If you include a versionId in your request header, you must have the
+//
+// s3:GetObjectVersion permission to access a specific version of an object. The
+// s3:GetObject permission is not required in this scenario.
+//
+// If you request the current version of an object without a specific versionId in
+//
+// the request header, only the s3:GetObject permission is required. The
+// s3:GetObjectVersion permission is not required in this scenario.
+//
+// If the object that you request doesn’t exist, the error that Amazon S3 returns
+//
+// depends on whether you also have the s3:ListBucket permission.
+//
+// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an
+// HTTP status code 404 Not Found error.
+//
+// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP
+// status code 403 Access Denied error.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// If the object is encrypted using SSE-KMS, you must also have the
+//
+// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies
+// and KMS key policies for the KMS key.
+//
+// Storage classes If the object you are retrieving is stored in the S3 Glacier
+// Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the
+// S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep
+// Archive Access tier, before you can retrieve the object you must first restore a
+// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For
+// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, only the S3 Express One Zone storage
+// class is supported to store newly created objects. Unsupported storage class
+// values won't write a destination object and will respond with the HTTP status
+// code 400 Bad Request .
+//
+// Encryption Encryption request headers, like x-amz-server-side-encryption ,
+// should not be sent for the GetObject requests, if your object uses server-side
+// encryption with Amazon S3 managed encryption keys (SSE-S3), server-side
+// encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer
+// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you
+// include the header in your GetObject requests for the object that uses these
+// types of keys, you’ll get an HTTP 400 Bad Request error.
+//
+// Directory buckets - For directory buckets, there are only two supported options
+// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more
+// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide.
+//
+// Overriding response header values through the request There are times when you
+// want to override certain response header values of a GetObject response. For
+// example, you might override the Content-Disposition response header value
+// through your GetObject request.
+//
+// You can override values for a set of response headers. These modified response
+// header values are included only in a successful response, that is, when the HTTP
+// status code 200 OK is returned. The headers you can override using the
+// following query parameters in the request are a subset of the headers that
+// Amazon S3 accepts when you create an object.
+//
+// The response headers that you can override for the GetObject response are
+// Cache-Control , Content-Disposition , Content-Encoding , Content-Language ,
+// Content-Type , and Expires .
+//
+// To override values for a set of response headers in the GetObject response, you
+// can use the following query parameters in the request.
+//
+// - response-cache-control
+//
+// - response-content-disposition
+//
+// - response-content-encoding
+//
+// - response-content-language
+//
+// - response-content-type
+//
+// - response-expires
+//
+// When you use these parameters, you must sign the request by using either an
+// Authorization header or a presigned URL. These parameters cannot be used with an
+// unsigned (anonymous) request.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following operations are related to GetObject :
+//
+// [ListBuckets]
+//
+// [GetObjectAcl]
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
+// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
+// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html
+// [HTTP Host Header Bucket Specification]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket
+// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html
+// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
+// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+//
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) {
+ if params == nil {
+ params = &GetObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObject", params, optFns, c.addOperationGetObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectInput struct {
+
+ // The bucket name containing the object.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Object Lambda access points - When you use this action with an Object Lambda
+ // access point, you must direct requests to the Object Lambda access point
+ // hostname. The Object Lambda access point hostname takes the form
+ // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Key of the object to get.
+ //
+ // This member is required.
+ Key *string
+
+ // To retrieve the checksum, this mode must be enabled.
+ ChecksumMode types.ChecksumMode
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Return the object only if its entity tag (ETag) is the same as the one
+ // specified in this header; otherwise, return a 412 Precondition Failed error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows: If-Match condition evaluates to true , and;
+ // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and
+ // the data requested.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfMatch *string
+
+ // Return the object only if it has been modified since the specified time;
+ // otherwise, return a 304 Not Modified error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in the
+ // request as follows: If-None-Match condition evaluates to false , and;
+ // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not
+ // Modified status code.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfModifiedSince *time.Time
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified in this header; otherwise, return a 304 Not Modified error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in the
+ // request as follows: If-None-Match condition evaluates to false , and;
+ // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not
+ // Modified HTTP status code.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfNoneMatch *string
+
+ // Return the object only if it has not been modified since the specified time;
+ // otherwise, return a 412 Precondition Failed error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows: If-Match condition evaluates to true , and;
+ // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and
+ // the data requested.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfUnmodifiedSince *time.Time
+
+ // Part number of the object being read. This is a positive integer between 1 and
+ // 10,000. Effectively performs a 'ranged' GET request for the part specified.
+ // Useful for downloading just a part of an object.
+ PartNumber *int32
+
+ // Downloads the specified byte range of an object. For more information about the
+ // HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range].
+ //
+ // Amazon S3 doesn't support retrieving multiple ranges of data per GET request.
+ //
+ // [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range
+ Range *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Sets the Cache-Control header of the response.
+ ResponseCacheControl *string
+
+ // Sets the Content-Disposition header of the response.
+ ResponseContentDisposition *string
+
+ // Sets the Content-Encoding header of the response.
+ ResponseContentEncoding *string
+
+ // Sets the Content-Language header of the response.
+ ResponseContentLanguage *string
+
+ // Sets the Content-Type header of the response.
+ ResponseContentType *string
+
+ // Sets the Expires header of the response.
+ ResponseExpires *time.Time
+
+ // Specifies the algorithm to use when decrypting the object (for example, AES256 ).
+ //
+ // If you encrypt an object by using server-side encryption with customer-provided
+ // encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+ // GET the object, you must use the following headers:
+ //
+ // - x-amz-server-side-encryption-customer-algorithm
+ //
+ // - x-amz-server-side-encryption-customer-key
+ //
+ // - x-amz-server-side-encryption-customer-key-MD5
+ //
+ // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key that you originally provided for
+ // Amazon S3 to encrypt the data before storing it. This value is used to decrypt
+ // the object when recovering it and must match the one used when storing the data.
+ // The key must be appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // If you encrypt an object by using server-side encryption with customer-provided
+ // encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+ // GET the object, you must use the following headers:
+ //
+ // - x-amz-server-side-encryption-customer-algorithm
+ //
+ // - x-amz-server-side-encryption-customer-key
+ //
+ // - x-amz-server-side-encryption-customer-key-MD5
+ //
+ // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the customer-provided encryption key
+ // according to RFC 1321. Amazon S3 uses this header for a message integrity check
+ // to ensure that the encryption key was transmitted without error.
+ //
+ // If you encrypt an object by using server-side encryption with customer-provided
+ // encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+ // GET the object, you must use the following headers:
+ //
+ // - x-amz-server-side-encryption-customer-algorithm
+ //
+ // - x-amz-server-side-encryption-customer-key
+ //
+ // - x-amz-server-side-encryption-customer-key-MD5
+ //
+ // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerKeyMD5 *string
+
+ // Version ID used to reference a specific version of the object.
+ //
+ // By default, the GetObject operation returns the current version of an object.
+ // To return a different version, use the versionId subresource.
+ //
+ // - If you include a versionId in your request header, you must have the
+ // s3:GetObjectVersion permission to access a specific version of an object. The
+ // s3:GetObject permission is not required in this scenario.
+ //
+ // - If you request the current version of an object without a specific versionId
+ // in the request header, only the s3:GetObject permission is required. The
+ // s3:GetObjectVersion permission is not required in this scenario.
+ //
+ // - Directory buckets - S3 Versioning isn't enabled and supported for directory
+ // buckets. For this API operation, only the null value of the version ID is
+ // supported by directory buckets. You can only specify null to the versionId
+ // query parameter in the request.
+ //
+ // For more information about versioning, see [PutBucketVersioning].
+ //
+ // [PutBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type GetObjectOutput struct {
+
+ // Indicates that a range of bytes was specified in the request.
+ AcceptRanges *string
+
+ // Object data.
+ Body io.ReadCloser
+
+ // Indicates whether the object uses an S3 Bucket Key for server-side encryption
+ // with Key Management Service (KMS) keys (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only
+ // present if the object was uploaded with the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be
+ // present if the object was uploaded with the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more
+ // information, see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be
+ // present if the object was uploaded with the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be
+ // present if the object was uploaded with the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // The checksum type, which determines how part-level checksums are combined to
+ // create an object-level checksum for multipart objects. You can use this header
+ // response to verify that the checksum type that is received is the same checksum
+ // type that was specified in the CreateMultipartUpload request. For more
+ // information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType types.ChecksumType
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string
+
+ // Indicates what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field.
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // Size of the body in bytes.
+ ContentLength *int64
+
+ // The portion of the object returned in the response.
+ ContentRange *string
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string
+
+ // Indicates whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ //
+ // - If the current version of the object is a delete marker, Amazon S3 behaves
+ // as if the object was deleted and includes x-amz-delete-marker: true in the
+ // response.
+ //
+ // - If the specified version in the request is a delete marker, the response
+ // returns a 405 Method Not Allowed error and the Last-Modified: timestamp
+ // response header.
+ DeleteMarker *bool
+
+ // An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ // specific version of a resource found at a URL.
+ ETag *string
+
+ // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ),
+ // the response includes this header. It includes the expiry-date and rule-id
+ // key-value pairs providing object expiration information. The value of the
+ // rule-id is URL-encoded.
+ //
+ // Object expiration information is not returned in directory buckets and this
+ // header returns the value " NotImplemented " in all responses for directory
+ // buckets.
+ //
+ // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
+ Expiration *string
+
+ // The date and time at which the object is no longer cacheable.
+ //
+ // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using
+ // the ExpiresString field which contains the unparsed value from the service
+ // response.
+ Expires *time.Time
+
+ // The unparsed value of the Expires field from the service response. Prefer use
+ // of this value over the normal Expires response field where possible.
+ ExpiresString *string
+
+ // Date and time when the object was last modified.
+ //
+ // General purpose buckets - When you specify a versionId of the object in your
+ // request, if the specified version in the request is a delete marker, the
+ // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp
+ // response header.
+ LastModified *time.Time
+
+ // A map of metadata to store with the object in S3.
+ //
+ // Map keys will be normalized to lower-case.
+ Metadata map[string]string
+
+ // This is set to the number of metadata entries not returned in the headers that
+ // are prefixed with x-amz-meta- . This can happen if you create metadata using an
+ // API like SOAP that supports more flexible metadata than the REST API. For
+ // example, using SOAP, you can create metadata whose values are not legal HTTP
+ // headers.
+ //
+ // This functionality is not supported for directory buckets.
+ MissingMeta *int32
+
+ // Indicates whether this object has an active legal hold. This field is only
+ // returned if you have permission to view an object's legal hold status.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // The Object Lock mode that's currently in place for this object.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockMode types.ObjectLockMode
+
+ // The date and time when this object's Object Lock will expire.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockRetainUntilDate *time.Time
+
+ // The count of parts this object has. This value is only returned if you specify
+ // partNumber in your request and the object was uploaded as a multipart upload.
+ PartsCount *int32
+
+ // Amazon S3 can return this if your request involves a bucket that is either a
+ // source or destination in a replication rule.
+ //
+ // This functionality is not supported for directory buckets.
+ ReplicationStatus types.ReplicationStatus
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Provides information about object restoration action and expiration time of the
+ // restored object copy.
+ //
+ // This functionality is not supported for directory buckets. Only the S3 Express
+ // One Zone storage class is supported by directory buckets to store objects.
+ Restore *string
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to confirm the encryption
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to provide the round-trip
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // If present, indicates the ID of the KMS key that was used for object encryption.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when you store this object in Amazon
+ // S3.
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Provides storage class information of the object. Amazon S3 returns this header
+ // for all objects except for S3 Standard storage class objects.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
+ StorageClass types.StorageClass
+
+ // The number of tags, if any, on the object, when you have the relevant
+ // permission to read object tags.
+ //
+ // You can use [GetObjectTagging] to retrieve the tag set associated with an object.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
+ TagCount *int32
+
+ // Version ID of the object.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ // If the bucket is configured as a website, redirects requests for this object to
+ // another object in the same bucket or to an external URL. Amazon S3 stores the
+ // value of this header in the object metadata.
+ //
+ // This functionality is not supported for directory buckets.
+ WebsiteRedirectLocation *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetObject"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addResponseChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addGetObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetObjectInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetObject",
+ }
+}
+
+// getGetObjectRequestValidationModeMember gets the request checksum validation
+// mode provided as input.
+func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) {
+ in := input.(*GetObjectInput)
+ if len(in.ChecksumMode) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumMode), true
+}
+
+func setGetObjectRequestValidationModeMember(input interface{}, mode string) {
+ in := input.(*GetObjectInput)
+ in.ChecksumMode = types.ChecksumMode(mode)
+}
+
+func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{
+ GetValidationMode: getGetObjectRequestValidationModeMember,
+ SetValidationMode: setGetObjectRequestValidationModeMember,
+ ResponseChecksumValidation: options.ResponseChecksumValidation,
+ ValidationAlgorithms: []string{"CRC64NVME", "CRC32", "CRC32C", "SHA256", "SHA1"},
+ IgnoreMultipartValidation: true,
+ LogValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped,
+ LogMultipartValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped,
+ })
+}
+
+// getGetObjectBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getGetObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
+
+// PresignGetObject is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignGetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &GetObjectInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "GetObject", params, clientOptFns,
+ c.client.addOperationGetObjectMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ addGetObjectPayloadAsUnsigned,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
+
+func addGetObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
+ v4.RemoveContentSHA256HeaderMiddleware(stack)
+ v4.RemoveComputePayloadSHA256Middleware(stack)
+ return v4.AddUnsignedPayloadMiddleware(stack)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go
new file mode 100644
index 000000000..03db6fb83
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go
@@ -0,0 +1,299 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the access control list (ACL) of an object. To use this operation, you
+// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For
+// more information, see [Mapping of ACL permissions and access policy permissions]in the Amazon S3 User Guide
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// By default, GET returns ACL information about the current version of an object.
+// To return ACL information about a different version, use the versionId
+// subresource.
+//
+// If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
+// requests to read ACLs are still supported and return the
+// bucket-owner-full-control ACL with the owner being the account that created the
+// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide.
+//
+// The following operations are related to GetObjectAcl :
+//
+// [GetObject]
+//
+// [GetObjectAttributes]
+//
+// [DeleteObject]
+//
+// [PutObject]
+//
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [Mapping of ACL permissions and access policy permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) {
+ if params == nil {
+ params = &GetObjectAclInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectAcl", params, optFns, c.addOperationGetObjectAclMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectAclOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectAclInput struct {
+
+ // The bucket name that contains the object for which to get the ACL information.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key of the object for which to get the ACL information.
+ //
+ // This member is required.
+ Key *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Version ID used to reference a specific version of the object.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetObjectAclInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type GetObjectAclOutput struct {
+
+ // A list of grants.
+ Grants []types.Grant
+
+ // Container for the bucket owner's display name and ID.
+ Owner *types.Owner
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAcl"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetObjectAclValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAcl(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetObjectAclInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetObjectAcl",
+ }
+}
+
+// getGetObjectAclBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetObjectAclBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectAclInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectAclBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go
new file mode 100644
index 000000000..03c7a3929
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go
@@ -0,0 +1,508 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Retrieves all the metadata from an object without returning the object itself.
+// This operation is useful if you're interested only in an object's metadata.
+//
+// GetObjectAttributes combines the functionality of HeadObject and ListParts . All
+// of the data returned with each of those individual calls can be returned with a
+// single call to GetObjectAttributes .
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - To use GetObjectAttributes , you must
+// have READ access to the object. The permissions that you need to use this
+// operation depend on whether the bucket is versioned. If the bucket is versioned,
+// you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes
+// permissions for this operation. If the bucket is not versioned, you need the
+// s3:GetObject and s3:GetObjectAttributes permissions. For more information, see [Specifying Permissions in a Policy]
+// in the Amazon S3 User Guide. If the object that you request does not exist, the
+// error Amazon S3 returns depends on whether you also have the s3:ListBucket
+// permission.
+//
+// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an
+// HTTP status code 404 Not Found ("no such key") error.
+//
+// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP
+// status code 403 Forbidden ("access denied") error.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// If the object is encrypted with SSE-KMS, you must also have the
+//
+// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies
+// and KMS key policies for the KMS key.
+//
+// Encryption Encryption request headers, like x-amz-server-side-encryption ,
+// should not be sent for HEAD requests if your object uses server-side encryption
+// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side
+// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side
+// encryption with Amazon S3 managed encryption keys (SSE-S3). The
+// x-amz-server-side-encryption header is used when you PUT an object to S3 and
+// want to specify the encryption method. If you include this header in a GET
+// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad
+// Request error. It's because the encryption method can't be changed when you
+// retrieve the object.
+//
+// If you encrypt an object by using server-side encryption with customer-provided
+// encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+// retrieve the metadata from the object, you must use the following headers to
+// provide the encryption key for the server to be able to retrieve the object's
+// metadata. The headers are:
+//
+// - x-amz-server-side-encryption-customer-algorithm
+//
+// - x-amz-server-side-encryption-customer-key
+//
+// - x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+//
+// Directory bucket permissions - For directory buckets, there are only two
+// supported options for server-side encryption: server-side encryption with Amazon
+// S3 managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys
+// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses
+// the desired encryption configuration and you don't override the bucket default
+// encryption in your CreateSession requests or PUT object requests. Then, new
+// objects are automatically encrypted with the desired encryption settings. For
+// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about
+// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads].
+//
+// Versioning Directory buckets - S3 Versioning isn't enabled and supported for
+// directory buckets. For this API operation, only the null value of the version
+// ID is supported by directory buckets. You can only specify null to the versionId
+// query parameter in the request.
+//
+// Conditional request headers Consider the following when using request headers:
+//
+// - If both of the If-Match and If-Unmodified-Since headers are present in the
+// request as follows, then Amazon S3 returns the HTTP status code 200 OK and the
+// data requested:
+//
+// - If-Match condition evaluates to true .
+//
+// - If-Unmodified-Since condition evaluates to false .
+//
+// For more information about conditional requests, see [RFC 7232].
+//
+// - If both of the If-None-Match and If-Modified-Since headers are present in
+// the request as follows, then Amazon S3 returns the HTTP status code 304 Not
+// Modified :
+//
+// - If-None-Match condition evaluates to false .
+//
+// - If-Modified-Since condition evaluates to true .
+//
+// For more information about conditional requests, see [RFC 7232].
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following actions are related to GetObjectAttributes :
+//
+// [GetObject]
+//
+// [GetObjectAcl]
+//
+// [GetObjectLegalHold]
+//
+// [GetObjectLockConfiguration]
+//
+// [GetObjectRetention]
+//
+// [GetObjectTagging]
+//
+// [HeadObject]
+//
+// [ListParts]
+//
+// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html
+// [GetObjectLegalHold]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+// [RFC 7232]: https://tools.ietf.org/html/rfc7232
+// [HeadObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html
+// [GetObjectLockConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html
+// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
+// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
+// [GetObjectRetention]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) {
+ if params == nil {
+ params = &GetObjectAttributesInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectAttributes", params, optFns, c.addOperationGetObjectAttributesMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectAttributesOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectAttributesInput struct {
+
+ // The name of the bucket that contains the object.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The object key.
+ //
+ // This member is required.
+ Key *string
+
+ // Specifies the fields at the root level that you want returned in the response.
+ // Fields that you do not specify are not returned.
+ //
+ // This member is required.
+ ObjectAttributes []types.ObjectAttributes
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Sets the maximum number of parts to return.
+ MaxParts *int32
+
+ // Specifies the part after which listing should begin. Only parts with higher
+ // part numbers will be listed.
+ PartNumberMarker *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // The version ID used to reference a specific version of the object.
+ //
+ // S3 Versioning isn't enabled and supported for directory buckets. For this API
+ // operation, only the null value of the version ID is supported by directory
+ // buckets. You can only specify null to the versionId query parameter in the
+ // request.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetObjectAttributesInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type GetObjectAttributesOutput struct {
+
+ // The checksum or digest of the object.
+ Checksum *types.Checksum
+
+ // Specifies whether the object retrieved was ( true ) or was not ( false ) a
+ // delete marker. If false , this response header does not appear in the response.
+ // To learn more about delete markers, see [Working with delete markers].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html
+ DeleteMarker *bool
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL.
+ ETag *string
+
+ // Date and time when the object was last modified.
+ LastModified *time.Time
+
+ // A collection of parts associated with a multipart upload.
+ ObjectParts *types.GetObjectAttributesParts
+
+ // The size of the object in bytes.
+ ObjectSize *int64
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Provides the storage class information of the object. Amazon S3 returns this
+ // header for all objects except for S3 Standard storage class objects.
+ //
+ // For more information, see [Storage Classes].
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
+ StorageClass types.StorageClass
+
+ // The version ID of the object.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAttributes{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAttributes{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAttributes"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAttributes(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetObjectAttributesInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetObjectAttributes(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetObjectAttributes",
+ }
+}
+
+// getGetObjectAttributesBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetObjectAttributesBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectAttributesInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectAttributesUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectAttributesBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go
new file mode 100644
index 000000000..459702e8d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go
@@ -0,0 +1,267 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Gets an object's current legal hold status. For more information, see [Locking Objects].
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// The following action is related to GetObjectLegalHold :
+//
+// [GetObjectAttributes]
+//
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
+func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) {
+ if params == nil {
+ params = &GetObjectLegalHoldInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectLegalHold", params, optFns, c.addOperationGetObjectLegalHoldMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectLegalHoldOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectLegalHoldInput struct {
+
+ // The bucket name containing the object whose legal hold status you want to
+ // retrieve.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key name for the object whose legal hold status you want to retrieve.
+ //
+ // This member is required.
+ Key *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // The version ID of the object whose legal hold status you want to retrieve.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type GetObjectLegalHoldOutput struct {
+
+ // The current legal hold status for the specified object.
+ LegalHold *types.ObjectLockLegalHold
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLegalHold{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLegalHold{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLegalHold"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLegalHold(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetObjectLegalHoldInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetObjectLegalHold",
+ }
+}
+
+// getGetObjectLegalHoldBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetObjectLegalHoldBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectLegalHoldInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectLegalHoldBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go
new file mode 100644
index 000000000..f49a4458c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go
@@ -0,0 +1,246 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Gets the Object Lock configuration for a bucket. The rule specified in the
+// Object Lock configuration will be applied by default to every new object placed
+// in the specified bucket. For more information, see [Locking Objects].
+//
+// The following action is related to GetObjectLockConfiguration :
+//
+// [GetObjectAttributes]
+//
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
+func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) {
+ if params == nil {
+ params = &GetObjectLockConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectLockConfiguration", params, optFns, c.addOperationGetObjectLockConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectLockConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectLockConfigurationInput struct {
+
+ // The bucket whose Object Lock configuration you want to retrieve.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type GetObjectLockConfigurationOutput struct {
+
+ // The specified bucket's Object Lock configuration.
+ ObjectLockConfiguration *types.ObjectLockConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLockConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectLockConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLockConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectLockConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetObjectLockConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetObjectLockConfiguration",
+ }
+}
+
+// getGetObjectLockConfigurationBucketMember returns a pointer to string denoting
+// a provided bucket member valueand a boolean indicating if the input has a
+// modeled bucket name,
+func getGetObjectLockConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectLockConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectLockConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go
new file mode 100644
index 000000000..bd2d0aaa1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go
@@ -0,0 +1,267 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Retrieves an object's retention settings. For more information, see [Locking Objects].
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// The following action is related to GetObjectRetention :
+//
+// [GetObjectAttributes]
+//
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
+func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) {
+ if params == nil {
+ params = &GetObjectRetentionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectRetention", params, optFns, c.addOperationGetObjectRetentionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectRetentionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectRetentionInput struct {
+
+ // The bucket name containing the object whose retention settings you want to
+ // retrieve.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key name for the object whose retention settings you want to retrieve.
+ //
+ // This member is required.
+ Key *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // The version ID for the object whose retention settings you want to retrieve.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetObjectRetentionInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type GetObjectRetentionOutput struct {
+
+ // The container element for an object's retention settings.
+ Retention *types.ObjectLockRetention
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectRetention{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectRetention{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectRetention"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectRetention(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetObjectRetentionInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetObjectRetention",
+ }
+}
+
+// getGetObjectRetentionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetObjectRetentionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectRetentionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectRetentionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go
new file mode 100644
index 000000000..204a15793
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go
@@ -0,0 +1,296 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns the tag-set of an object. You send the GET request against the tagging
+// subresource associated with the object.
+//
+// To use this operation, you must have permission to perform the
+// s3:GetObjectTagging action. By default, the GET action returns information about
+// current version of an object. For a versioned bucket, you can have multiple
+// versions of an object in your bucket. To retrieve tags of any other version, use
+// the versionId query parameter. You also need permission for the
+// s3:GetObjectVersionTagging action.
+//
+// By default, the bucket owner has this permission and can grant this permission
+// to others.
+//
+// For information about the Amazon S3 object tagging feature, see [Object Tagging].
+//
+// The following actions are related to GetObjectTagging :
+//
+// [DeleteObjectTagging]
+//
+// [GetObjectAttributes]
+//
+// [PutObjectTagging]
+//
+// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html
+// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html
+func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) {
+ if params == nil {
+ params = &GetObjectTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectTagging", params, optFns, c.addOperationGetObjectTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectTaggingInput struct {
+
+ // The bucket name containing the object for which to get the tagging information.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which to get the tagging information.
+ //
+ // This member is required.
+ Key *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // The versionId of the object for which to get the tagging information.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetObjectTaggingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type GetObjectTaggingOutput struct {
+
+ // Contains the tag set.
+ //
+ // This member is required.
+ TagSet []types.Tag
+
+ // The versionId of the object for which you got the tagging information.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTagging"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetObjectTaggingInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetObjectTagging",
+ }
+}
+
+// getGetObjectTaggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetObjectTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go
new file mode 100644
index 000000000..5959ce734
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go
@@ -0,0 +1,262 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns torrent files from a bucket. BitTorrent can save you bandwidth when
+// you're distributing large files.
+//
+// You can get torrent only for objects that are less than 5 GB in size, and that
+// are not encrypted using server-side encryption with a customer-provided
+// encryption key.
+//
+// To use GET, you must have READ access to the object.
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// The following action is related to GetObjectTorrent :
+//
+// [GetObject]
+//
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) {
+ if params == nil {
+ params = &GetObjectTorrentInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetObjectTorrent", params, optFns, c.addOperationGetObjectTorrentMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetObjectTorrentOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetObjectTorrentInput struct {
+
+ // The name of the bucket containing the object for which to get the torrent files.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The object key for which to get the information.
+ //
+ // This member is required.
+ Key *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetObjectTorrentInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type GetObjectTorrentOutput struct {
+
+ // A Bencoded dictionary as defined by the BitTorrent specification
+ Body io.ReadCloser
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTorrent{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectTorrent{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTorrent"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectTorrent(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetObjectTorrentInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetObjectTorrent(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetObjectTorrent",
+ }
+}
+
+// getGetObjectTorrentBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getGetObjectTorrentBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetObjectTorrentInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetObjectTorrentUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetObjectTorrentBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go
new file mode 100644
index 000000000..97005c108
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go
@@ -0,0 +1,258 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use
+// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For
+// more information about Amazon S3 permissions, see [Specifying Permissions in a Policy].
+//
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an
+// object, it checks the PublicAccessBlock configuration for both the bucket (or
+// the bucket that contains the object) and the bucket owner's account. If the
+// PublicAccessBlock settings are different between the bucket and the account,
+// Amazon S3 uses the most restrictive combination of the bucket-level and
+// account-level settings.
+//
+// For more information about when Amazon S3 considers a bucket or an object
+// public, see [The Meaning of "Public"].
+//
+// The following operations are related to GetPublicAccessBlock :
+//
+// [Using Amazon S3 Block Public Access]
+//
+// [PutPublicAccessBlock]
+//
+// [GetPublicAccessBlock]
+//
+// [DeletePublicAccessBlock]
+//
+// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html
+// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html
+// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html
+// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
+func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) {
+ if params == nil {
+ params = &GetPublicAccessBlockInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetPublicAccessBlock", params, optFns, c.addOperationGetPublicAccessBlockMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetPublicAccessBlockOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetPublicAccessBlockInput struct {
+
+ // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want
+ // to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type GetPublicAccessBlockOutput struct {
+
+ // The PublicAccessBlock configuration currently in effect for this Amazon S3
+ // bucket.
+ PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpGetPublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetPublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetPublicAccessBlock"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicAccessBlock(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *GetPublicAccessBlockInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opGetPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetPublicAccessBlock",
+ }
+}
+
+// getGetPublicAccessBlockBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getGetPublicAccessBlockBucketMember(input interface{}) (*string, bool) {
+ in := input.(*GetPublicAccessBlockInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addGetPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getGetPublicAccessBlockBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go
new file mode 100644
index 000000000..21470dc11
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go
@@ -0,0 +1,725 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithytime "github.com/aws/smithy-go/time"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ smithywaiter "github.com/aws/smithy-go/waiter"
+ "time"
+)
+
+// You can use this operation to determine if a bucket exists and if you have
+// permission to access it. The action returns a 200 OK if the bucket exists and
+// you have permission to access it.
+//
+// If the bucket does not exist or you do not have permission to access it, the
+// HEAD request returns a generic 400 Bad Request , 403 Forbidden or 404 Not Found
+// code. A message body is not included, so you cannot determine the exception
+// beyond these HTTP response codes.
+//
+// Authentication and authorization General purpose buckets - Request to public
+// buckets that grant the s3:ListBucket permission publicly do not need to be
+// signed. All other HeadBucket requests must be authenticated and signed by using
+// IAM credentials (access key ID and secret access key for the IAM identities).
+// All headers with the x-amz- prefix, including x-amz-copy-source , must be
+// signed. For more information, see [REST Authentication].
+//
+// Directory buckets - You must use IAM credentials to authenticate and authorize
+// your access to the HeadBucket API operation, instead of using the temporary
+// security credentials through the CreateSession API operation.
+//
+// Amazon Web Services CLI or SDKs handles authentication and authorization on
+// your behalf.
+//
+// Permissions
+//
+// - General purpose bucket permissions - To use this operation, you must have
+// permissions to perform the s3:ListBucket action. The bucket owner has this
+// permission by default and can grant this permission to others. For more
+// information about permissions, see [Managing access permissions to your Amazon S3 resources]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - You must have the s3express:CreateSession
+// permission in the Action element of a policy. By default, the session is in
+// the ReadWrite mode. If you want to restrict the access, you can explicitly set
+// the s3express:SessionMode condition key to ReadOnly on the bucket.
+//
+// For more information about example bucket policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3
+//
+// User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// You must make requests for this API operation to the Zonal endpoint. These
+// endpoints support virtual-hosted-style requests in the format
+// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style
+// requests are not supported. For more information about endpoints in Availability
+// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints in
+// Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Managing access permissions to your Amazon S3 resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) {
+ if params == nil {
+ params = &HeadBucketInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "HeadBucket", params, optFns, c.addOperationHeadBucketMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*HeadBucketOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type HeadBucketInput struct {
+
+ // The bucket name.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Object Lambda access points - When you use this API operation with an Object
+ // Lambda access point, provide the alias of the Object Lambda access point in
+ // place of the bucket name. If the Object Lambda access point alias in a request
+ // is not valid, the error code InvalidAccessPointAliasError is returned. For more
+ // information about InvalidAccessPointAliasError , see [List of Error Codes].
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList
+ //
+ // This member is required.
+ Bucket *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type HeadBucketOutput struct {
+
+ // Indicates whether the bucket name used in the request is an access point alias.
+ //
+ // For directory buckets, the value of this field is false .
+ AccessPointAlias *bool
+
+ // The name of the location where the bucket will be created.
+ //
+ // For directory buckets, the Zone ID of the Availability Zone or the Local Zone
+ // where the bucket is created. An example Zone ID value for an Availability Zone
+ // is usw2-az1 .
+ //
+ // This functionality is only supported by directory buckets.
+ BucketLocationName *string
+
+ // The type of location where the bucket is created.
+ //
+ // This functionality is only supported by directory buckets.
+ BucketLocationType types.LocationType
+
+ // The Region that the bucket is located.
+ BucketRegion *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpHeadBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadBucket{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "HeadBucket"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpHeadBucketValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadBucket(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addHeadBucketUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// BucketExistsWaiterOptions are waiter options for BucketExistsWaiter
+type BucketExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ //
+ // Passing options here is functionally equivalent to passing values to this
+ // config's ClientOptions field that extend the inner client's APIOptions directly.
+ APIOptions []func(*middleware.Stack) error
+
+ // Functional options to be passed to all operations invoked by this client.
+ //
+ // Function values that modify the inner APIOptions are applied after the waiter
+ // config's own APIOptions modifiers.
+ ClientOptions []func(*Options)
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // BucketExistsWaiter will use default minimum delay of 5 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or
+ // set to zero, BucketExistsWaiter will use default max delay of 120 seconds. Note
+ // that MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error)
+}
+
+// BucketExistsWaiter defines the waiters for BucketExists
+type BucketExistsWaiter struct {
+ client HeadBucketAPIClient
+
+ options BucketExistsWaiterOptions
+}
+
+// NewBucketExistsWaiter constructs a BucketExistsWaiter.
+func NewBucketExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketExistsWaiterOptions)) *BucketExistsWaiter {
+ options := BucketExistsWaiterOptions{}
+ options.MinDelay = 5 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = bucketExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &BucketExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for BucketExists waiter. The maxWaitDur is the
+// maximum wait duration the waiter will wait. The maxWaitDur is required and must
+// be greater than zero.
+func (w *BucketExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) error {
+ _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...)
+ return err
+}
+
+// WaitForOutput calls the waiter function for BucketExists waiter and returns the
+// output of the successful operation. The maxWaitDur is the maximum wait duration
+// the waiter will wait. The maxWaitDur is required and must be greater than zero.
+func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketExistsWaiterOptions)) (*HeadBucketOutput, error) {
+ if maxWaitDur <= 0 {
+ return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.HeadBucket(ctx, params, func(o *Options) {
+ baseOpts := []func(*Options){
+ addIsWaiterUserAgent,
+ }
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ for _, opt := range baseOpts {
+ opt(o)
+ }
+ for _, opt := range options.ClientOptions {
+ opt(o)
+ }
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return nil, err
+ }
+ if !retryable {
+ return out, nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return nil, fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return nil, fmt.Errorf("exceeded max wait time for BucketExists waiter")
+}
+
+func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) {
+
+ if err == nil {
+ return false, nil
+ }
+
+ if err != nil {
+ var errorType *types.NotFound
+ if errors.As(err, &errorType) {
+ return true, nil
+ }
+ }
+
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+// BucketNotExistsWaiterOptions are waiter options for BucketNotExistsWaiter
+type BucketNotExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ //
+ // Passing options here is functionally equivalent to passing values to this
+ // config's ClientOptions field that extend the inner client's APIOptions directly.
+ APIOptions []func(*middleware.Stack) error
+
+ // Functional options to be passed to all operations invoked by this client.
+ //
+ // Function values that modify the inner APIOptions are applied after the waiter
+ // config's own APIOptions modifiers.
+ ClientOptions []func(*Options)
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // BucketNotExistsWaiter will use default minimum delay of 5 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or
+ // set to zero, BucketNotExistsWaiter will use default max delay of 120 seconds.
+ // Note that MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error)
+}
+
+// BucketNotExistsWaiter defines the waiters for BucketNotExists
+type BucketNotExistsWaiter struct {
+ client HeadBucketAPIClient
+
+ options BucketNotExistsWaiterOptions
+}
+
+// NewBucketNotExistsWaiter constructs a BucketNotExistsWaiter.
+func NewBucketNotExistsWaiter(client HeadBucketAPIClient, optFns ...func(*BucketNotExistsWaiterOptions)) *BucketNotExistsWaiter {
+ options := BucketNotExistsWaiterOptions{}
+ options.MinDelay = 5 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = bucketNotExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &BucketNotExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for BucketNotExists waiter. The maxWaitDur is
+// the maximum wait duration the waiter will wait. The maxWaitDur is required and
+// must be greater than zero.
+func (w *BucketNotExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) error {
+ _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...)
+ return err
+}
+
+// WaitForOutput calls the waiter function for BucketNotExists waiter and returns
+// the output of the successful operation. The maxWaitDur is the maximum wait
+// duration the waiter will wait. The maxWaitDur is required and must be greater
+// than zero.
+func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) (*HeadBucketOutput, error) {
+ if maxWaitDur <= 0 {
+ return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.HeadBucket(ctx, params, func(o *Options) {
+ baseOpts := []func(*Options){
+ addIsWaiterUserAgent,
+ }
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ for _, opt := range baseOpts {
+ opt(o)
+ }
+ for _, opt := range options.ClientOptions {
+ opt(o)
+ }
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return nil, err
+ }
+ if !retryable {
+ return out, nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return nil, fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return nil, fmt.Errorf("exceeded max wait time for BucketNotExists waiter")
+}
+
+func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, output *HeadBucketOutput, err error) (bool, error) {
+
+ if err != nil {
+ var errorType *types.NotFound
+ if errors.As(err, &errorType) {
+ return false, nil
+ }
+ }
+
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func (v *HeadBucketInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+// HeadBucketAPIClient is a client that implements the HeadBucket operation.
+type HeadBucketAPIClient interface {
+ HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error)
+}
+
+var _ HeadBucketAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "HeadBucket",
+ }
+}
+
+// getHeadBucketBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getHeadBucketBucketMember(input interface{}) (*string, bool) {
+ in := input.(*HeadBucketInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addHeadBucketUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getHeadBucketBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
+
+// PresignHeadBucket is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignHeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &HeadBucketInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "HeadBucket", params, clientOptFns,
+ c.client.addOperationHeadBucketMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ addHeadBucketPayloadAsUnsigned,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
+
+func addHeadBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
+ v4.RemoveContentSHA256HeaderMiddleware(stack)
+ v4.RemoveComputePayloadSHA256Middleware(stack)
+ return v4.AddUnsignedPayloadMiddleware(stack)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go
new file mode 100644
index 000000000..ce7a68477
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go
@@ -0,0 +1,1204 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithytime "github.com/aws/smithy-go/time"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ smithywaiter "github.com/aws/smithy-go/waiter"
+ "time"
+)
+
+// The HEAD operation retrieves metadata from an object without returning the
+// object itself. This operation is useful if you're interested only in an object's
+// metadata.
+//
+// A HEAD request has the same options as a GET operation on an object. The
+// response is identical to the GET response except that there is no response
+// body. Because of this, if the HEAD request generates an error, it returns a
+// generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405
+// Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not
+// possible to retrieve the exact exception of these error codes.
+//
+// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers].
+//
+// Permissions
+//
+// - General purpose bucket permissions - To use HEAD , you must have the
+// s3:GetObject permission. You need the relevant read object (or version)
+// permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3]in the Amazon S3
+// User Guide. For more information about the permissions to S3 API operations by
+// S3 resource types, see Required permissions for Amazon S3 API operationsin the Amazon S3 User Guide.
+//
+// If the object you request doesn't exist, the error that Amazon S3 returns
+//
+// depends on whether you also have the s3:ListBucket permission.
+//
+// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an
+// HTTP status code 404 Not Found error.
+//
+// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP
+// status code 403 Forbidden error.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// If you enable x-amz-checksum-mode in the request and the object is encrypted
+//
+// with Amazon Web Services Key Management Service (Amazon Web Services KMS), you
+// must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM
+// identity-based policies and KMS key policies for the KMS key to retrieve the
+// checksum of the object.
+//
+// Encryption Encryption request headers, like x-amz-server-side-encryption ,
+// should not be sent for HEAD requests if your object uses server-side encryption
+// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side
+// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side
+// encryption with Amazon S3 managed encryption keys (SSE-S3). The
+// x-amz-server-side-encryption header is used when you PUT an object to S3 and
+// want to specify the encryption method. If you include this header in a HEAD
+// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad
+// Request error. It's because the encryption method can't be changed when you
+// retrieve the object.
+//
+// If you encrypt an object by using server-side encryption with customer-provided
+// encryption keys (SSE-C) when you store the object in Amazon S3, then when you
+// retrieve the metadata from the object, you must use the following headers to
+// provide the encryption key for the server to be able to retrieve the object's
+// metadata. The headers are:
+//
+// - x-amz-server-side-encryption-customer-algorithm
+//
+// - x-amz-server-side-encryption-customer-key
+//
+// - x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide.
+//
+// Directory bucket - For directory buckets, there are only two supported options
+// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more
+// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide.
+//
+// Versioning
+//
+// - If the current version of the object is a delete marker, Amazon S3 behaves
+// as if the object was deleted and includes x-amz-delete-marker: true in the
+// response.
+//
+// - If the specified version is a delete marker, the response returns a 405
+// Method Not Allowed error and the Last-Modified: timestamp response header.
+//
+// - Directory buckets - Delete marker is not supported for directory buckets.
+//
+// - Directory buckets - S3 Versioning isn't enabled and supported for directory
+// buckets. For this API operation, only the null value of the version ID is
+// supported by directory buckets. You can only specify null to the versionId
+// query parameter in the request.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// For directory buckets, you must make requests for this API operation to the
+// Zonal endpoint. These endpoints support virtual-hosted-style requests in the
+// format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// The following actions are related to HeadObject :
+//
+// [GetObject]
+//
+// [GetObjectAttributes]
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
+// [Actions, resources, and condition keys for Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Common Request Headers]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+//
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) {
+ if params == nil {
+ params = &HeadObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "HeadObject", params, optFns, c.addOperationHeadObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*HeadObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type HeadObjectInput struct {
+
+ // The name of the bucket that contains the object.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The object key.
+ //
+ // This member is required.
+ Key *string
+
+ // To retrieve the checksum, this parameter must be enabled.
+ //
+ // General purpose buckets - If you enable checksum mode and the object is
+ // uploaded with a [checksum]and encrypted with an Key Management Service (KMS) key, you
+ // must have permission to use the kms:Decrypt action to retrieve the checksum.
+ //
+ // Directory buckets - If you enable ChecksumMode and the object is encrypted with
+ // Amazon Web Services Key Management Service (Amazon Web Services KMS), you must
+ // also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM
+ // identity-based policies and KMS key policies for the KMS key to retrieve the
+ // checksum of the object.
+ //
+ // [checksum]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html
+ ChecksumMode types.ChecksumMode
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Return the object only if its entity tag (ETag) is the same as the one
+ // specified; otherwise, return a 412 (precondition failed) error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows:
+ //
+ // - If-Match condition evaluates to true , and;
+ //
+ // - If-Unmodified-Since condition evaluates to false ;
+ //
+ // Then Amazon S3 returns 200 OK and the data requested.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfMatch *string
+
+ // Return the object only if it has been modified since the specified time;
+ // otherwise, return a 304 (not modified) error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in the
+ // request as follows:
+ //
+ // - If-None-Match condition evaluates to false , and;
+ //
+ // - If-Modified-Since condition evaluates to true ;
+ //
+ // Then Amazon S3 returns the 304 Not Modified response code.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfModifiedSince *time.Time
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified; otherwise, return a 304 (not modified) error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in the
+ // request as follows:
+ //
+ // - If-None-Match condition evaluates to false , and;
+ //
+ // - If-Modified-Since condition evaluates to true ;
+ //
+ // Then Amazon S3 returns the 304 Not Modified response code.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfNoneMatch *string
+
+ // Return the object only if it has not been modified since the specified time;
+ // otherwise, return a 412 (precondition failed) error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows:
+ //
+ // - If-Match condition evaluates to true , and;
+ //
+ // - If-Unmodified-Since condition evaluates to false ;
+ //
+ // Then Amazon S3 returns 200 OK and the data requested.
+ //
+ // For more information about conditional requests, see [RFC 7232].
+ //
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfUnmodifiedSince *time.Time
+
+ // Part number of the object being read. This is a positive integer between 1 and
+ // 10,000. Effectively performs a 'ranged' HEAD request for the part specified.
+ // Useful querying about the size of the part and the number of parts in this
+ // object.
+ PartNumber *int32
+
+ // HeadObject returns only the metadata for an object. If the Range is
+ // satisfiable, only the ContentLength is affected in the response. If the Range
+ // is not satisfiable, S3 returns a 416 - Requested Range Not Satisfiable error.
+ Range *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Sets the Cache-Control header of the response.
+ ResponseCacheControl *string
+
+ // Sets the Content-Disposition header of the response.
+ ResponseContentDisposition *string
+
+ // Sets the Content-Encoding header of the response.
+ ResponseContentEncoding *string
+
+ // Sets the Content-Language header of the response.
+ ResponseContentLanguage *string
+
+ // Sets the Content-Type header of the response.
+ ResponseContentType *string
+
+ // Sets the Expires header of the response.
+ ResponseExpires *time.Time
+
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // Version ID used to reference a specific version of the object.
+ //
+ // For directory buckets in this API operation, only the null value of the version
+ // ID is supported.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *HeadObjectInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type HeadObjectOutput struct {
+
+ // Indicates that a range of bytes was specified.
+ AcceptRanges *string
+
+ // The archive state of the head object.
+ //
+ // This functionality is not supported for directory buckets.
+ ArchiveStatus types.ArchiveStatus
+
+ // Indicates whether the object uses an S3 Bucket Key for server-side encryption
+ // with Key Management Service (KMS) keys (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only
+ // be present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only
+ // present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32C *string
+
+ // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more
+ // information, see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use the API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA256 *string
+
+ // The checksum type, which determines how part-level checksums are combined to
+ // create an object-level checksum for multipart objects. You can use this header
+ // response to verify that the checksum type that is received is the same checksum
+ // type that was specified in CreateMultipartUpload request. For more information,
+ // see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType types.ChecksumType
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string
+
+ // Indicates what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field.
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // Size of the body in bytes.
+ ContentLength *int64
+
+ // The portion of the object returned in the response for a GET request.
+ ContentRange *string
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ //
+ // This functionality is not supported for directory buckets.
+ DeleteMarker *bool
+
+ // An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ // specific version of a resource found at a URL.
+ ETag *string
+
+ // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ),
+ // the response includes this header. It includes the expiry-date and rule-id
+ // key-value pairs providing object expiration information. The value of the
+ // rule-id is URL-encoded.
+ //
+ // Object expiration information is not returned in directory buckets and this
+ // header returns the value " NotImplemented " in all responses for directory
+ // buckets.
+ //
+ // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
+ Expiration *string
+
+ // The date and time at which the object is no longer cacheable.
+ //
+ // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using
+ // the ExpiresString field which contains the unparsed value from the service
+ // response.
+ Expires *time.Time
+
+ // The unparsed value of the Expires field from the service response. Prefer use
+ // of this value over the normal Expires response field where possible.
+ ExpiresString *string
+
+ // Date and time when the object was last modified.
+ LastModified *time.Time
+
+ // A map of metadata to store with the object in S3.
+ //
+ // Map keys will be normalized to lower-case.
+ Metadata map[string]string
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP, you
+ // can create metadata whose values are not legal HTTP headers.
+ //
+ // This functionality is not supported for directory buckets.
+ MissingMeta *int32
+
+ // Specifies whether a legal hold is in effect for this object. This header is
+ // only returned if the requester has the s3:GetObjectLegalHold permission. This
+ // header is not returned if the specified version of this object has never had a
+ // legal hold applied. For more information about S3 Object Lock, see [Object Lock].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // The Object Lock mode, if any, that's in effect for this object. This header is
+ // only returned if the requester has the s3:GetObjectRetention permission. For
+ // more information about S3 Object Lock, see [Object Lock].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
+ ObjectLockMode types.ObjectLockMode
+
+ // The date and time when the Object Lock retention period expires. This header is
+ // only returned if the requester has the s3:GetObjectRetention permission.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockRetainUntilDate *time.Time
+
+ // The count of parts this object has. This value is only returned if you specify
+ // partNumber in your request and the object was uploaded as a multipart upload.
+ PartsCount *int32
+
+ // Amazon S3 can return this header if your request involves a bucket that is
+ // either a source or a destination in a replication rule.
+ //
+ // In replication, you have a source bucket on which you configure replication and
+ // destination bucket or buckets where Amazon S3 stores object replicas. When you
+ // request an object ( GetObject ) or object metadata ( HeadObject ) from these
+ // buckets, Amazon S3 will return the x-amz-replication-status header in the
+ // response as follows:
+ //
+ // - If requesting an object from the source bucket, Amazon S3 will return the
+ // x-amz-replication-status header if the object in your request is eligible for
+ // replication.
+ //
+ // For example, suppose that in your replication configuration, you specify object
+ // prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix
+ // TaxDocs . Any objects you upload with this key name prefix, for example
+ // TaxDocs/document1.pdf , are eligible for replication. For any object request
+ // with this key name prefix, Amazon S3 will return the x-amz-replication-status
+ // header with value PENDING, COMPLETED or FAILED indicating object replication
+ // status.
+ //
+ // - If requesting an object from a destination bucket, Amazon S3 will return
+ // the x-amz-replication-status header with value REPLICA if the object in your
+ // request is a replica that Amazon S3 created and there is no replica modification
+ // replication in progress.
+ //
+ // - When replicating objects to multiple destination buckets, the
+ // x-amz-replication-status header acts differently. The header of the source
+ // object will only return a value of COMPLETED when replication is successful to
+ // all destinations. The header will remain at value PENDING until replication has
+ // completed for all destinations. If one or more destinations fails replication
+ // the header will return FAILED.
+ //
+ // For more information, see [Replication].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+ ReplicationStatus types.ReplicationStatus
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // If the object is an archived object (an object whose storage class is GLACIER),
+ // the response includes this header if either the archive restoration is in
+ // progress (see [RestoreObject]or an archive copy is already restored.
+ //
+ // If an archive copy is already restored, the header value indicates when Amazon
+ // S3 is scheduled to delete the object copy. For example:
+ //
+ // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00
+ // GMT"
+ //
+ // If the object restoration is in progress, the header returns the value
+ // ongoing-request="true" .
+ //
+ // For more information about archiving objects, see [Transitioning Objects: General Considerations].
+ //
+ // This functionality is not supported for directory buckets. Only the S3 Express
+ // One Zone storage class is supported by directory buckets to store objects.
+ //
+ // [Transitioning Objects: General Considerations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations
+ // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
+ Restore *string
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to confirm the encryption
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to provide the round-trip
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // If present, indicates the ID of the KMS key that was used for object encryption.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when you store this object in Amazon
+ // S3 (for example, AES256 , aws:kms , aws:kms:dsse ).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Provides storage class information of the object. Amazon S3 returns this header
+ // for all objects except for S3 Standard storage class objects.
+ //
+ // For more information, see [Storage Classes].
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
+ StorageClass types.StorageClass
+
+ // Version ID of the object.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ // If the bucket is configured as a website, redirects requests for this object to
+ // another object in the same bucket or to an external URL. Amazon S3 stores the
+ // value of this header in the object metadata.
+ //
+ // This functionality is not supported for directory buckets.
+ WebsiteRedirectLocation *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpHeadObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpHeadObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "HeadObject"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpHeadObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opHeadObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addHeadObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter
+type ObjectExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ //
+ // Passing options here is functionally equivalent to passing values to this
+ // config's ClientOptions field that extend the inner client's APIOptions directly.
+ APIOptions []func(*middleware.Stack) error
+
+ // Functional options to be passed to all operations invoked by this client.
+ //
+ // Function values that modify the inner APIOptions are applied after the waiter
+ // config's own APIOptions modifiers.
+ ClientOptions []func(*Options)
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // ObjectExistsWaiter will use default minimum delay of 5 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or
+ // set to zero, ObjectExistsWaiter will use default max delay of 120 seconds. Note
+ // that MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error)
+}
+
+// ObjectExistsWaiter defines the waiters for ObjectExists
+type ObjectExistsWaiter struct {
+ client HeadObjectAPIClient
+
+ options ObjectExistsWaiterOptions
+}
+
+// NewObjectExistsWaiter constructs a ObjectExistsWaiter.
+func NewObjectExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectExistsWaiterOptions)) *ObjectExistsWaiter {
+ options := ObjectExistsWaiterOptions{}
+ options.MinDelay = 5 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = objectExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &ObjectExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for ObjectExists waiter. The maxWaitDur is the
+// maximum wait duration the waiter will wait. The maxWaitDur is required and must
+// be greater than zero.
+func (w *ObjectExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) error {
+ _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...)
+ return err
+}
+
+// WaitForOutput calls the waiter function for ObjectExists waiter and returns the
+// output of the successful operation. The maxWaitDur is the maximum wait duration
+// the waiter will wait. The maxWaitDur is required and must be greater than zero.
+func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectExistsWaiterOptions)) (*HeadObjectOutput, error) {
+ if maxWaitDur <= 0 {
+ return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.HeadObject(ctx, params, func(o *Options) {
+ baseOpts := []func(*Options){
+ addIsWaiterUserAgent,
+ }
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ for _, opt := range baseOpts {
+ opt(o)
+ }
+ for _, opt := range options.ClientOptions {
+ opt(o)
+ }
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return nil, err
+ }
+ if !retryable {
+ return out, nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return nil, fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return nil, fmt.Errorf("exceeded max wait time for ObjectExists waiter")
+}
+
+func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) {
+
+ if err == nil {
+ return false, nil
+ }
+
+ if err != nil {
+ var errorType *types.NotFound
+ if errors.As(err, &errorType) {
+ return true, nil
+ }
+ }
+
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+// ObjectNotExistsWaiterOptions are waiter options for ObjectNotExistsWaiter
+type ObjectNotExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ //
+ // Passing options here is functionally equivalent to passing values to this
+ // config's ClientOptions field that extend the inner client's APIOptions directly.
+ APIOptions []func(*middleware.Stack) error
+
+ // Functional options to be passed to all operations invoked by this client.
+ //
+ // Function values that modify the inner APIOptions are applied after the waiter
+ // config's own APIOptions modifiers.
+ ClientOptions []func(*Options)
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // ObjectNotExistsWaiter will use default minimum delay of 5 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or
+ // set to zero, ObjectNotExistsWaiter will use default max delay of 120 seconds.
+ // Note that MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error)
+}
+
+// ObjectNotExistsWaiter defines the waiters for ObjectNotExists
+type ObjectNotExistsWaiter struct {
+ client HeadObjectAPIClient
+
+ options ObjectNotExistsWaiterOptions
+}
+
+// NewObjectNotExistsWaiter constructs a ObjectNotExistsWaiter.
+func NewObjectNotExistsWaiter(client HeadObjectAPIClient, optFns ...func(*ObjectNotExistsWaiterOptions)) *ObjectNotExistsWaiter {
+ options := ObjectNotExistsWaiterOptions{}
+ options.MinDelay = 5 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = objectNotExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &ObjectNotExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for ObjectNotExists waiter. The maxWaitDur is
+// the maximum wait duration the waiter will wait. The maxWaitDur is required and
+// must be greater than zero.
+func (w *ObjectNotExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) error {
+ _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...)
+ return err
+}
+
+// WaitForOutput calls the waiter function for ObjectNotExists waiter and returns
+// the output of the successful operation. The maxWaitDur is the maximum wait
+// duration the waiter will wait. The maxWaitDur is required and must be greater
+// than zero.
+func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) (*HeadObjectOutput, error) {
+ if maxWaitDur <= 0 {
+ return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.HeadObject(ctx, params, func(o *Options) {
+ baseOpts := []func(*Options){
+ addIsWaiterUserAgent,
+ }
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ for _, opt := range baseOpts {
+ opt(o)
+ }
+ for _, opt := range options.ClientOptions {
+ opt(o)
+ }
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return nil, err
+ }
+ if !retryable {
+ return out, nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return nil, fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return nil, fmt.Errorf("exceeded max wait time for ObjectNotExists waiter")
+}
+
+func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) {
+
+ if err != nil {
+ var errorType *types.NotFound
+ if errors.As(err, &errorType) {
+ return false, nil
+ }
+ }
+
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func (v *HeadObjectInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+// HeadObjectAPIClient is a client that implements the HeadObject operation.
+type HeadObjectAPIClient interface {
+ HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error)
+}
+
+var _ HeadObjectAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "HeadObject",
+ }
+}
+
+// getHeadObjectBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getHeadObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*HeadObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addHeadObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getHeadObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
+
+// PresignHeadObject is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignHeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &HeadObjectInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "HeadObject", params, clientOptFns,
+ c.client.addOperationHeadObjectMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ addHeadObjectPayloadAsUnsigned,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
+
+func addHeadObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
+ v4.RemoveContentSHA256HeaderMiddleware(stack)
+ v4.RemoveComputePayloadSHA256Middleware(stack)
+ return v4.AddUnsignedPayloadMiddleware(stack)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go
new file mode 100644
index 000000000..2580b44aa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go
@@ -0,0 +1,276 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Lists the analytics configurations for the bucket. You can have up to 1,000
+// analytics configurations per bucket.
+//
+// This action supports list pagination and does not return more than 100
+// configurations at a time. You should always check the IsTruncated element in
+// the response. If there are no more configurations to list, IsTruncated is set
+// to false. If there are more configurations to list, IsTruncated is set to true,
+// and there will be a value in NextContinuationToken . You use the
+// NextContinuationToken value to continue the pagination of the list by passing
+// the value in continuation-token in the request to GET the next page.
+//
+// To use this operation, you must have permissions to perform the
+// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis].
+//
+// The following operations are related to ListBucketAnalyticsConfigurations :
+//
+// [GetBucketAnalyticsConfiguration]
+//
+// [DeleteBucketAnalyticsConfiguration]
+//
+// [PutBucketAnalyticsConfiguration]
+//
+// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html
+// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html
+// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) {
+ if params == nil {
+ params = &ListBucketAnalyticsConfigurationsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBucketAnalyticsConfigurations", params, optFns, c.addOperationListBucketAnalyticsConfigurationsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketAnalyticsConfigurationsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketAnalyticsConfigurationsInput struct {
+
+ // The name of the bucket from which analytics configurations are retrieved.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ContinuationToken that represents a placeholder from where this request
+ // should begin.
+ ContinuationToken *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type ListBucketAnalyticsConfigurationsOutput struct {
+
+ // The list of analytics configurations for a bucket.
+ AnalyticsConfigurationList []types.AnalyticsConfiguration
+
+ // The marker that is used as a starting point for this analytics configuration
+ // list response. This value is present if it was sent in the request.
+ ContinuationToken *string
+
+ // Indicates whether the returned list of analytics configurations is complete. A
+ // value of true indicates that the list is not complete and the
+ // NextContinuationToken will be provided for a subsequent request.
+ IsTruncated *bool
+
+ // NextContinuationToken is sent when isTruncated is true, which indicates that
+ // there are more analytics configurations to list. The next request must include
+ // this NextContinuationToken . The token is obfuscated and is not a usable value.
+ NextContinuationToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketAnalyticsConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketAnalyticsConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketAnalyticsConfigurations"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *ListBucketAnalyticsConfigurationsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListBucketAnalyticsConfigurations",
+ }
+}
+
+// getListBucketAnalyticsConfigurationsBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getListBucketAnalyticsConfigurationsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListBucketAnalyticsConfigurationsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListBucketAnalyticsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListBucketAnalyticsConfigurationsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go
new file mode 100644
index 000000000..2a77b6804
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go
@@ -0,0 +1,270 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Lists the S3 Intelligent-Tiering configuration from the specified bucket.
+//
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
+// without performance impact or operational overhead. S3 Intelligent-Tiering
+// delivers automatic cost savings in three low latency and high throughput access
+// tiers. To get the lowest storage cost on data that can be accessed in minutes to
+// hours, you can choose to activate additional archiving capabilities.
+//
+// The S3 Intelligent-Tiering storage class is the ideal storage class for data
+// with unknown, changing, or unpredictable access patterns, independent of object
+// size or retention period. If the size of an object is less than 128 KB, it is
+// not monitored and not eligible for auto-tiering. Smaller objects can be stored,
+// but they are always charged at the Frequent Access tier rates in the S3
+// Intelligent-Tiering storage class.
+//
+// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// Operations related to ListBucketIntelligentTieringConfigurations include:
+//
+// [DeleteBucketIntelligentTieringConfiguration]
+//
+// [PutBucketIntelligentTieringConfiguration]
+//
+// [GetBucketIntelligentTieringConfiguration]
+//
+// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html
+// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
+// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html
+func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) {
+ if params == nil {
+ params = &ListBucketIntelligentTieringConfigurationsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBucketIntelligentTieringConfigurations", params, optFns, c.addOperationListBucketIntelligentTieringConfigurationsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketIntelligentTieringConfigurationsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketIntelligentTieringConfigurationsInput struct {
+
+ // The name of the Amazon S3 bucket whose configuration you want to modify or
+ // retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ContinuationToken that represents a placeholder from where this request
+ // should begin.
+ ContinuationToken *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type ListBucketIntelligentTieringConfigurationsOutput struct {
+
+ // The ContinuationToken that represents a placeholder from where this request
+ // should begin.
+ ContinuationToken *string
+
+ // The list of S3 Intelligent-Tiering configurations for a bucket.
+ IntelligentTieringConfigurationList []types.IntelligentTieringConfiguration
+
+ // Indicates whether the returned list of analytics configurations is complete. A
+ // value of true indicates that the list is not complete and the
+ // NextContinuationToken will be provided for a subsequent request.
+ IsTruncated *bool
+
+ // The marker used to continue this inventory configuration listing. Use the
+ // NextContinuationToken from this response to continue the listing in a subsequent
+ // request. The continuation token is an opaque value that Amazon S3 understands.
+ NextContinuationToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketIntelligentTieringConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketIntelligentTieringConfigurations"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *ListBucketIntelligentTieringConfigurationsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListBucketIntelligentTieringConfigurations",
+ }
+}
+
+// getListBucketIntelligentTieringConfigurationsBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getListBucketIntelligentTieringConfigurationsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListBucketIntelligentTieringConfigurationsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListBucketIntelligentTieringConfigurationsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go
new file mode 100644
index 000000000..0bd202161
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go
@@ -0,0 +1,278 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns a list of inventory configurations for the bucket. You can have up to
+// 1,000 analytics configurations per bucket.
+//
+// This action supports list pagination and does not return more than 100
+// configurations at a time. Always check the IsTruncated element in the response.
+// If there are no more configurations to list, IsTruncated is set to false. If
+// there are more configurations to list, IsTruncated is set to true, and there is
+// a value in NextContinuationToken . You use the NextContinuationToken value to
+// continue the pagination of the list by passing the value in continuation-token
+// in the request to GET the next page.
+//
+// To use this operation, you must have permissions to perform the
+// s3:GetInventoryConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]
+//
+// The following operations are related to ListBucketInventoryConfigurations :
+//
+// [GetBucketInventoryConfiguration]
+//
+// [DeleteBucketInventoryConfiguration]
+//
+// [PutBucketInventoryConfiguration]
+//
+// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html
+// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html
+func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) {
+ if params == nil {
+ params = &ListBucketInventoryConfigurationsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBucketInventoryConfigurations", params, optFns, c.addOperationListBucketInventoryConfigurationsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketInventoryConfigurationsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketInventoryConfigurationsInput struct {
+
+ // The name of the bucket containing the inventory configurations to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The marker used to continue an inventory configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value that
+ // Amazon S3 understands.
+ ContinuationToken *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type ListBucketInventoryConfigurationsOutput struct {
+
+ // If sent in the request, the marker that is used as a starting point for this
+ // inventory configuration list response.
+ ContinuationToken *string
+
+ // The list of inventory configurations for a bucket.
+ InventoryConfigurationList []types.InventoryConfiguration
+
+ // Tells whether the returned list of inventory configurations is complete. A
+ // value of true indicates that the list is not complete and the
+ // NextContinuationToken is provided for a subsequent request.
+ IsTruncated *bool
+
+ // The marker used to continue this inventory configuration listing. Use the
+ // NextContinuationToken from this response to continue the listing in a subsequent
+ // request. The continuation token is an opaque value that Amazon S3 understands.
+ NextContinuationToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketInventoryConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketInventoryConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketInventoryConfigurations"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketInventoryConfigurations(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *ListBucketInventoryConfigurationsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opListBucketInventoryConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListBucketInventoryConfigurations",
+ }
+}
+
+// getListBucketInventoryConfigurationsBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getListBucketInventoryConfigurationsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListBucketInventoryConfigurationsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListBucketInventoryConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListBucketInventoryConfigurationsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go
new file mode 100644
index 000000000..7f99075af
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go
@@ -0,0 +1,280 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Lists the metrics configurations for the bucket. The metrics configurations are
+// only for the request metrics of the bucket and do not provide information on
+// daily storage metrics. You can have up to 1,000 configurations per bucket.
+//
+// This action supports list pagination and does not return more than 100
+// configurations at a time. Always check the IsTruncated element in the response.
+// If there are no more configurations to list, IsTruncated is set to false. If
+// there are more configurations to list, IsTruncated is set to true, and there is
+// a value in NextContinuationToken . You use the NextContinuationToken value to
+// continue the pagination of the list by passing the value in continuation-token
+// in the request to GET the next page.
+//
+// To use this operation, you must have permissions to perform the
+// s3:GetMetricsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For more information about metrics configurations and CloudWatch request
+// metrics, see [Monitoring Metrics with Amazon CloudWatch].
+//
+// The following operations are related to ListBucketMetricsConfigurations :
+//
+// [PutBucketMetricsConfiguration]
+//
+// [GetBucketMetricsConfiguration]
+//
+// [DeleteBucketMetricsConfiguration]
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html
+// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html
+// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) {
+ if params == nil {
+ params = &ListBucketMetricsConfigurationsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBucketMetricsConfigurations", params, optFns, c.addOperationListBucketMetricsConfigurationsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketMetricsConfigurationsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketMetricsConfigurationsInput struct {
+
+ // The name of the bucket containing the metrics configurations to retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The marker that is used to continue a metrics configuration listing that has
+ // been truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value that
+ // Amazon S3 understands.
+ ContinuationToken *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListBucketMetricsConfigurationsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type ListBucketMetricsConfigurationsOutput struct {
+
+ // The marker that is used as a starting point for this metrics configuration list
+ // response. This value is present if it was sent in the request.
+ ContinuationToken *string
+
+ // Indicates whether the returned list of metrics configurations is complete. A
+ // value of true indicates that the list is not complete and the
+ // NextContinuationToken will be provided for a subsequent request.
+ IsTruncated *bool
+
+ // The list of metrics configurations for a bucket.
+ MetricsConfigurationList []types.MetricsConfiguration
+
+ // The marker used to continue a metrics configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value that
+ // Amazon S3 understands.
+ NextContinuationToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketMetricsConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBucketMetricsConfigurations{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketMetricsConfigurations"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBucketMetricsConfigurations(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *ListBucketMetricsConfigurationsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opListBucketMetricsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListBucketMetricsConfigurations",
+ }
+}
+
+// getListBucketMetricsConfigurationsBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getListBucketMetricsConfigurationsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListBucketMetricsConfigurationsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListBucketMetricsConfigurationsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListBucketMetricsConfigurationsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go
new file mode 100644
index 000000000..aec9715d6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go
@@ -0,0 +1,349 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns a list of all buckets owned by the authenticated sender of the request.
+// To grant IAM permission to use this operation, you must add the
+// s3:ListAllMyBuckets policy action.
+//
+// For information about Amazon S3 buckets, see [Creating, configuring, and working with Amazon S3 buckets].
+//
+// We strongly recommend using only paginated ListBuckets requests. Unpaginated
+// ListBuckets requests are only supported for Amazon Web Services accounts set to
+// the default general purpose bucket quota of 10,000. If you have an approved
+// general purpose bucket quota above 10,000, you must send paginated ListBuckets
+// requests to list your account’s buckets. All unpaginated ListBuckets requests
+// will be rejected for Amazon Web Services accounts with a general purpose bucket
+// quota greater than 10,000.
+//
+// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html
+func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) {
+ if params == nil {
+ params = &ListBucketsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBuckets", params, optFns, c.addOperationListBucketsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBucketsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBucketsInput struct {
+
+ // Limits the response to buckets that are located in the specified Amazon Web
+ // Services Region. The Amazon Web Services Region must be expressed according to
+ // the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon)
+ // Region. For a list of the valid values for all of the Amazon Web Services
+ // Regions, see [Regions and Endpoints].
+ //
+ // Requests made to a Regional endpoint that is different from the bucket-region
+ // parameter are not supported. For example, if you want to limit the response to
+ // your buckets in Region us-west-2 , the request must be made to an endpoint in
+ // Region us-west-2 .
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ BucketRegion *string
+
+ // ContinuationToken indicates to Amazon S3 that the list is being continued on
+ // this bucket with a token. ContinuationToken is obfuscated and is not a real
+ // key. You can use this ContinuationToken for pagination of the list results.
+ //
+ // Length Constraints: Minimum length of 0. Maximum length of 1024.
+ //
+ // Required: No.
+ //
+ // If you specify the bucket-region , prefix , or continuation-token query
+ // parameters without using max-buckets to set the maximum number of buckets
+ // returned in the response, Amazon S3 applies a default page size of 10,000 and
+ // provides a continuation token if there are more buckets.
+ ContinuationToken *string
+
+ // Maximum number of buckets to be returned in response. When the number is more
+ // than the count of buckets that are owned by an Amazon Web Services account,
+ // return all the buckets in response.
+ MaxBuckets *int32
+
+ // Limits the response to bucket names that begin with the specified bucket name
+ // prefix.
+ Prefix *string
+
+ noSmithyDocumentSerde
+}
+
+type ListBucketsOutput struct {
+
+ // The list of buckets owned by the requester.
+ Buckets []types.Bucket
+
+ // ContinuationToken is included in the response when there are more buckets that
+ // can be listed with pagination. The next ListBuckets request to Amazon S3 can be
+ // continued with this ContinuationToken . ContinuationToken is obfuscated and is
+ // not a real bucket.
+ ContinuationToken *string
+
+ // The owner of the buckets listed.
+ Owner *types.Owner
+
+ // If Prefix was sent with the request, it is included in the response.
+ //
+ // All bucket names in the response begin with the specified bucket name prefix.
+ Prefix *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListBuckets{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListBuckets{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListBuckets"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListBucketsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListBucketsPaginatorOptions is the paginator options for ListBuckets
+type ListBucketsPaginatorOptions struct {
+ // Maximum number of buckets to be returned in response. When the number is more
+ // than the count of buckets that are owned by an Amazon Web Services account,
+ // return all the buckets in response.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListBucketsPaginator is a paginator for ListBuckets
+type ListBucketsPaginator struct {
+ options ListBucketsPaginatorOptions
+ client ListBucketsAPIClient
+ params *ListBucketsInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListBucketsPaginator returns a new ListBucketsPaginator
+func NewListBucketsPaginator(client ListBucketsAPIClient, params *ListBucketsInput, optFns ...func(*ListBucketsPaginatorOptions)) *ListBucketsPaginator {
+ if params == nil {
+ params = &ListBucketsInput{}
+ }
+
+ options := ListBucketsPaginatorOptions{}
+ if params.MaxBuckets != nil {
+ options.Limit = *params.MaxBuckets
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListBucketsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.ContinuationToken,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListBucketsPaginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListBuckets page.
+func (p *ListBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListBucketsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.ContinuationToken = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.MaxBuckets = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListBuckets(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.ContinuationToken
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+// ListBucketsAPIClient is a client that implements the ListBuckets operation.
+type ListBucketsAPIClient interface {
+ ListBuckets(context.Context, *ListBucketsInput, ...func(*Options)) (*ListBucketsOutput, error)
+}
+
+var _ ListBucketsAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListBuckets",
+ }
+}
+
+func addListBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: nopGetBucketAccessor,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: false,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go
new file mode 100644
index 000000000..d8d85e9d6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go
@@ -0,0 +1,331 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a list of all Amazon S3 directory buckets owned by the authenticated
+// sender of the request. For more information about directory buckets, see [Directory buckets]in the
+// Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in
+// an IAM identity-based policy instead of a bucket policy. Cross-account access to
+// this API operation isn't supported. This operation can only be performed by the
+// Amazon Web Services account that owns the resource. For more information about
+// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
+//
+// The BucketRegion response element is not part of the ListDirectoryBuckets
+// Response Syntax.
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) {
+ if params == nil {
+ params = &ListDirectoryBucketsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListDirectoryBucketsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListDirectoryBucketsInput struct {
+
+ // ContinuationToken indicates to Amazon S3 that the list is being continued on
+ // buckets in this account with a token. ContinuationToken is obfuscated and is
+ // not a real bucket name. You can use this ContinuationToken for the pagination
+ // of the list results.
+ ContinuationToken *string
+
+ // Maximum number of buckets to be returned in response. When the number is more
+ // than the count of buckets that are owned by an Amazon Web Services account,
+ // return all the buckets in response.
+ MaxDirectoryBuckets *int32
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListDirectoryBucketsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type ListDirectoryBucketsOutput struct {
+
+ // The list of buckets owned by the requester.
+ Buckets []types.Bucket
+
+ // If ContinuationToken was sent with the request, it is included in the response.
+ // You can use the returned ContinuationToken for pagination of the list response.
+ ContinuationToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListDirectoryBuckets{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListDirectoryBuckets{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListDirectoryBuckets"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListDirectoryBucketsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListDirectoryBucketsPaginatorOptions is the paginator options for
+// ListDirectoryBuckets
+type ListDirectoryBucketsPaginatorOptions struct {
+ // Maximum number of buckets to be returned in response. When the number is more
+ // than the count of buckets that are owned by an Amazon Web Services account,
+ // return all the buckets in response.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListDirectoryBucketsPaginator is a paginator for ListDirectoryBuckets
+type ListDirectoryBucketsPaginator struct {
+ options ListDirectoryBucketsPaginatorOptions
+ client ListDirectoryBucketsAPIClient
+ params *ListDirectoryBucketsInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListDirectoryBucketsPaginator returns a new ListDirectoryBucketsPaginator
+func NewListDirectoryBucketsPaginator(client ListDirectoryBucketsAPIClient, params *ListDirectoryBucketsInput, optFns ...func(*ListDirectoryBucketsPaginatorOptions)) *ListDirectoryBucketsPaginator {
+ if params == nil {
+ params = &ListDirectoryBucketsInput{}
+ }
+
+ options := ListDirectoryBucketsPaginatorOptions{}
+ if params.MaxDirectoryBuckets != nil {
+ options.Limit = *params.MaxDirectoryBuckets
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListDirectoryBucketsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.ContinuationToken,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListDirectoryBucketsPaginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListDirectoryBuckets page.
+func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.ContinuationToken = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.MaxDirectoryBuckets = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.ContinuationToken
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+// ListDirectoryBucketsAPIClient is a client that implements the
+// ListDirectoryBuckets operation.
+type ListDirectoryBucketsAPIClient interface {
+ ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error)
+}
+
+var _ ListDirectoryBucketsAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListDirectoryBuckets",
+ }
+}
+
+func addListDirectoryBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: nopGetBucketAccessor,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go
new file mode 100644
index 000000000..7d5dd343e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go
@@ -0,0 +1,510 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation lists in-progress multipart uploads in a bucket. An in-progress
+// multipart upload is a multipart upload that has been initiated by the
+// CreateMultipartUpload request, but has not yet been completed or aborted.
+//
+// Directory buckets - If multipart uploads in a directory bucket are in progress,
+// you can't delete the bucket until all the in-progress multipart uploads are
+// aborted or completed. To delete these in-progress multipart uploads, use the
+// ListMultipartUploads operation to list the in-progress multipart uploads in the
+// bucket and use the AbortMultipartUpload operation to abort all the in-progress
+// multipart uploads.
+//
+// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads
+// in the response. The limit of 1,000 multipart uploads is also the default value.
+// You can further limit the number of uploads in a response by specifying the
+// max-uploads request parameter. If there are more than 1,000 multipart uploads
+// that satisfy your ListMultipartUploads request, the response returns an
+// IsTruncated element with the value of true , a NextKeyMarker element, and a
+// NextUploadIdMarker element. To list the remaining multipart uploads, you need to
+// make subsequent ListMultipartUploads requests. In these requests, include two
+// query parameters: key-marker and upload-id-marker . Set the value of key-marker
+// to the NextKeyMarker value from the previous response. Similarly, set the value
+// of upload-id-marker to the NextUploadIdMarker value from the previous response.
+//
+// Directory buckets - The upload-id-marker element and the NextUploadIdMarker
+// element aren't supported by directory buckets. To list the additional multipart
+// uploads, you only need to set the value of key-marker to the NextKeyMarker
+// value from the previous response.
+//
+// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - For information about permissions
+// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// Sorting of multipart uploads in response
+//
+// - General purpose bucket - In the ListMultipartUploads response, the multipart
+// uploads are sorted based on two criteria:
+//
+// - Key-based sorting - Multipart uploads are initially sorted in ascending
+// order based on their object keys.
+//
+// - Time-based sorting - For uploads that share the same object key, they are
+// further sorted in ascending order based on the upload initiation time. Among
+// uploads with the same key, the one that was initiated first will appear before
+// the ones that were initiated later.
+//
+// - Directory bucket - In the ListMultipartUploads response, the multipart
+// uploads aren't sorted lexicographically based on the object keys.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following operations are related to ListMultipartUploads :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [ListParts]
+//
+// [AbortMultipartUpload]
+//
+// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) {
+ if params == nil {
+ params = &ListMultipartUploadsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListMultipartUploads", params, optFns, c.addOperationListMultipartUploadsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListMultipartUploadsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListMultipartUploadsInput struct {
+
+ // The name of the bucket to which the multipart upload was initiated.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Character you use to group keys.
+ //
+ // All keys that contain the same string between the prefix, if specified, and the
+ // first occurrence of the delimiter after the prefix are grouped under a single
+ // result element, CommonPrefixes . If you don't specify the prefix parameter, then
+ // the substring starts at the beginning of the key. The keys that are grouped
+ // under CommonPrefixes result element are not returned elsewhere in the response.
+ //
+ // Directory buckets - For directory buckets, / is the only supported delimiter.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are
+ // encoded only in UTF-8. An object key can contain any Unicode character. However,
+ // the XML 1.0 parser can't parse certain characters, such as characters with an
+ // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you
+ // can add this parameter to request that Amazon S3 encode the keys in the
+ // response. For more information about characters to avoid in object key names,
+ // see [Object key naming guidelines].
+ //
+ // When using the URL encoding type, non-ASCII characters that are used in an
+ // object's key name will be percent-encoded according to UTF-8 code values. For
+ // example, the object test_file(3).png will appear as test_file%283%29.png .
+ //
+ // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines
+ // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html
+ EncodingType types.EncodingType
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Specifies the multipart upload after which listing should begin.
+ //
+ // - General purpose buckets - For general purpose buckets, key-marker is an
+ // object key. Together with upload-id-marker , this parameter specifies the
+ // multipart upload after which listing should begin.
+ //
+ // If upload-id-marker is not specified, only the keys lexicographically greater
+ // than the specified key-marker will be included in the list.
+ //
+ // If upload-id-marker is specified, any multipart uploads for a key equal to the
+ // key-marker might also be included, provided those multipart uploads have
+ // upload IDs lexicographically greater than the specified upload-id-marker .
+ //
+ // - Directory buckets - For directory buckets, key-marker is obfuscated and
+ // isn't a real object key. The upload-id-marker parameter isn't supported by
+ // directory buckets. To list the additional multipart uploads, you only need to
+ // set the value of key-marker to the NextKeyMarker value from the previous
+ // response.
+ //
+ // In the ListMultipartUploads response, the multipart uploads aren't sorted
+ // lexicographically based on the object keys.
+ KeyMarker *string
+
+ // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the
+ // response body. 1,000 is the maximum number of uploads that can be returned in a
+ // response.
+ MaxUploads *int32
+
+ // Lists in-progress uploads only for those keys that begin with the specified
+ // prefix. You can use prefixes to separate a bucket into different grouping of
+ // keys. (You can think of using prefix to make groups in the same way that you'd
+ // use a folder in a file system.)
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
+ Prefix *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter is
+ // ignored. Otherwise, any multipart uploads for a key equal to the key-marker
+ // might be included in the list only if they have an upload ID lexicographically
+ // greater than the specified upload-id-marker .
+ //
+ // This functionality is not supported for directory buckets.
+ UploadIdMarker *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListMultipartUploadsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Prefix = in.Prefix
+
+}
+
+type ListMultipartUploadsOutput struct {
+
+ // The name of the bucket to which the multipart upload was initiated. Does not
+ // return the access point ARN or access point alias if used.
+ Bucket *string
+
+ // If you specify a delimiter in the request, then the result returns each
+ // distinct key prefix containing the delimiter in a CommonPrefixes element. The
+ // distinct key prefixes are returned in the Prefix child element.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
+ CommonPrefixes []types.CommonPrefix
+
+ // Contains the delimiter you specified in the request. If you don't specify a
+ // delimiter in your request, this element is absent from the response.
+ //
+ // Directory buckets - For directory buckets, / is the only supported delimiter.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ //
+ // If you specify the encoding-type request parameter, Amazon S3 includes this
+ // element in the response, and returns encoded key name values in the following
+ // response elements:
+ //
+ // Delimiter , KeyMarker , Prefix , NextKeyMarker , Key .
+ EncodingType types.EncodingType
+
+ // Indicates whether the returned list of multipart uploads is truncated. A value
+ // of true indicates that the list was truncated. The list can be truncated if the
+ // number of multipart uploads exceeds the limit allowed or specified by max
+ // uploads.
+ IsTruncated *bool
+
+ // The key at or after which the listing began.
+ KeyMarker *string
+
+ // Maximum number of multipart uploads that could have been included in the
+ // response.
+ MaxUploads *int32
+
+ // When a list is truncated, this element specifies the value that should be used
+ // for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string
+
+ // When a list is truncated, this element specifies the value that should be used
+ // for the upload-id-marker request parameter in a subsequent request.
+ //
+ // This functionality is not supported for directory buckets.
+ NextUploadIdMarker *string
+
+ // When a prefix is provided in the request, this field contains the specified
+ // prefix. The result contains only keys starting with the specified prefix.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
+ Prefix *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter is
+ // ignored. Otherwise, any multipart uploads for a key equal to the key-marker
+ // might be included in the list only if they have an upload ID lexicographically
+ // greater than the specified upload-id-marker .
+ //
+ // This functionality is not supported for directory buckets.
+ UploadIdMarker *string
+
+ // Container for elements related to a particular multipart upload. A response can
+ // contain zero or more Upload elements.
+ Uploads []types.MultipartUpload
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListMultipartUploads{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListMultipartUploads{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListMultipartUploads"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMultipartUploads(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *ListMultipartUploadsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opListMultipartUploads(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListMultipartUploads",
+ }
+}
+
+// getListMultipartUploadsBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getListMultipartUploadsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListMultipartUploadsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListMultipartUploadsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListMultipartUploadsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go
new file mode 100644
index 000000000..6cfc98628
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go
@@ -0,0 +1,376 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns metadata about all versions of the objects in a bucket. You can also
+// use request parameters as selection criteria to return metadata about a subset
+// of all the object versions.
+//
+// To use this operation, you must have permission to perform the
+// s3:ListBucketVersions action. Be aware of the name difference.
+//
+// A 200 OK response can contain valid or invalid XML. Make sure to design your
+// application to parse the contents of the response and handle it appropriately.
+//
+// To use this operation, you must have READ access to the bucket.
+//
+// The following operations are related to ListObjectVersions :
+//
+// [ListObjectsV2]
+//
+// [GetObject]
+//
+// [PutObject]
+//
+// [DeleteObject]
+//
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
+func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) {
+ if params == nil {
+ params = &ListObjectVersionsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListObjectVersions", params, optFns, c.addOperationListObjectVersionsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListObjectVersionsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListObjectVersionsInput struct {
+
+ // The bucket name that contains the objects.
+ //
+ // This member is required.
+ Bucket *string
+
+ // A delimiter is a character that you specify to group keys. All keys that
+ // contain the same string between the prefix and the first occurrence of the
+ // delimiter are grouped under a single result element in CommonPrefixes . These
+ // groups are counted as one result against the max-keys limitation. These keys
+ // are not returned elsewhere in the response.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are
+ // encoded only in UTF-8. An object key can contain any Unicode character. However,
+ // the XML 1.0 parser can't parse certain characters, such as characters with an
+ // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you
+ // can add this parameter to request that Amazon S3 encode the keys in the
+ // response. For more information about characters to avoid in object key names,
+ // see [Object key naming guidelines].
+ //
+ // When using the URL encoding type, non-ASCII characters that are used in an
+ // object's key name will be percent-encoded according to UTF-8 code values. For
+ // example, the object test_file(3).png will appear as test_file%283%29.png .
+ //
+ // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines
+ // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html
+ EncodingType types.EncodingType
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Specifies the key to start with when listing objects in a bucket.
+ KeyMarker *string
+
+ // Sets the maximum number of keys returned in the response. By default, the
+ // action returns up to 1,000 key names. The response might contain fewer keys but
+ // will never contain more. If additional keys satisfy the search criteria, but
+ // were not returned because max-keys was exceeded, the response contains true . To
+ // return the additional keys, see key-marker and version-id-marker .
+ MaxKeys *int32
+
+ // Specifies the optional fields that you want returned in the response. Fields
+ // that you do not specify are not returned.
+ OptionalObjectAttributes []types.OptionalObjectAttributes
+
+ // Use this parameter to select only those keys that begin with the specified
+ // prefix. You can use prefixes to separate a bucket into different groupings of
+ // keys. (You can think of using prefix to make groups in the same way that you'd
+ // use a folder in a file system.) You can use prefix with delimiter to roll up
+ // numerous objects into a single result under CommonPrefixes .
+ Prefix *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Specifies the object version you want to start listing from.
+ VersionIdMarker *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListObjectVersionsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Prefix = in.Prefix
+
+}
+
+type ListObjectVersionsOutput struct {
+
+ // All of the keys rolled up into a common prefix count as a single return when
+ // calculating the number of returns.
+ CommonPrefixes []types.CommonPrefix
+
+ // Container for an object that is a delete marker. To learn more about delete
+ // markers, see [Working with delete markers].
+ //
+ // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html
+ DeleteMarkers []types.DeleteMarkerEntry
+
+ // The delimiter grouping the included keys. A delimiter is a character that you
+ // specify to group keys. All keys that contain the same string between the prefix
+ // and the first occurrence of the delimiter are grouped under a single result
+ // element in CommonPrefixes . These groups are counted as one result against the
+ // max-keys limitation. These keys are not returned elsewhere in the response.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ //
+ // If you specify the encoding-type request parameter, Amazon S3 includes this
+ // element in the response, and returns encoded key name values in the following
+ // response elements:
+ //
+ // KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter .
+ EncodingType types.EncodingType
+
+ // A flag that indicates whether Amazon S3 returned all of the results that
+ // satisfied the search criteria. If your results were truncated, you can make a
+ // follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker
+ // response parameters as a starting place in another request to return the rest of
+ // the results.
+ IsTruncated *bool
+
+ // Marks the last key returned in a truncated response.
+ KeyMarker *string
+
+ // Specifies the maximum number of objects to return.
+ MaxKeys *int32
+
+ // The bucket name.
+ Name *string
+
+ // When the number of responses exceeds the value of MaxKeys , NextKeyMarker
+ // specifies the first key not returned that satisfies the search criteria. Use
+ // this value for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string
+
+ // When the number of responses exceeds the value of MaxKeys , NextVersionIdMarker
+ // specifies the first object version not returned that satisfies the search
+ // criteria. Use this value for the version-id-marker request parameter in a
+ // subsequent request.
+ NextVersionIdMarker *string
+
+ // Selects objects that start with the value supplied by this parameter.
+ Prefix *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Marks the last version of the key returned in a truncated response.
+ VersionIdMarker *string
+
+ // Container for version information.
+ Versions []types.ObjectVersion
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectVersions{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectVersions{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectVersions"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectVersions(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *ListObjectVersionsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opListObjectVersions(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListObjectVersions",
+ }
+}
+
+// getListObjectVersionsBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getListObjectVersionsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListObjectVersionsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListObjectVersionsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListObjectVersionsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go
new file mode 100644
index 000000000..f56445886
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go
@@ -0,0 +1,399 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Returns some or all (up to 1,000) of the objects in a bucket. You can use the
+// request parameters as selection criteria to return a subset of the objects in a
+// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design
+// your application to parse the contents of the response and handle it
+// appropriately.
+//
+// This action has been revised. We recommend that you use the newer version, [ListObjectsV2],
+// when developing applications. For backward compatibility, Amazon S3 continues to
+// support ListObjects .
+//
+// The following operations are related to ListObjects :
+//
+// [ListObjectsV2]
+//
+// [GetObject]
+//
+// [PutObject]
+//
+// [CreateBucket]
+//
+// [ListBuckets]
+//
+// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
+func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) {
+ if params == nil {
+ params = &ListObjectsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListObjects", params, optFns, c.addOperationListObjectsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListObjectsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListObjectsInput struct {
+
+ // The name of the bucket containing the objects.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // A delimiter is a character that you use to group keys.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are
+ // encoded only in UTF-8. An object key can contain any Unicode character. However,
+ // the XML 1.0 parser can't parse certain characters, such as characters with an
+ // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you
+ // can add this parameter to request that Amazon S3 encode the keys in the
+ // response. For more information about characters to avoid in object key names,
+ // see [Object key naming guidelines].
+ //
+ // When using the URL encoding type, non-ASCII characters that are used in an
+ // object's key name will be percent-encoded according to UTF-8 code values. For
+ // example, the object test_file(3).png will appear as test_file%283%29.png .
+ //
+ // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines
+ // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html
+ EncodingType types.EncodingType
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. Marker can be any key in the bucket.
+ Marker *string
+
+ // Sets the maximum number of keys returned in the response. By default, the
+ // action returns up to 1,000 key names. The response might contain fewer keys but
+ // will never contain more.
+ MaxKeys *int32
+
+ // Specifies the optional fields that you want returned in the response. Fields
+ // that you do not specify are not returned.
+ OptionalObjectAttributes []types.OptionalObjectAttributes
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string
+
+ // Confirms that the requester knows that she or he will be charged for the list
+ // objects request. Bucket owners need not specify this parameter in their
+ // requests.
+ RequestPayer types.RequestPayer
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Prefix = in.Prefix
+
+}
+
+type ListObjectsOutput struct {
+
+ // All of the keys (up to 1,000) rolled up in a common prefix count as a single
+ // return when calculating the number of returns.
+ //
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ //
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the next
+ // occurrence of the string specified by the delimiter.
+ //
+ // CommonPrefixes lists keys that act like subdirectories in the directory
+ // specified by Prefix .
+ //
+ // For example, if the prefix is notes/ and the delimiter is a slash ( / ), as in
+ // notes/summer/july , the common prefix is notes/summer/ . All of the keys that
+ // roll up into a common prefix count as a single return when calculating the
+ // number of returns.
+ CommonPrefixes []types.CommonPrefix
+
+ // Metadata about each object returned.
+ Contents []types.Object
+
+ // Causes keys that contain the same string between the prefix and the first
+ // occurrence of the delimiter to be rolled up into a single result element in the
+ // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in
+ // the response. Each rolled-up result counts as only one return against the
+ // MaxKeys value.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are
+ // encoded only in UTF-8. An object key can contain any Unicode character. However,
+ // the XML 1.0 parser can't parse certain characters, such as characters with an
+ // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you
+ // can add this parameter to request that Amazon S3 encode the keys in the
+ // response. For more information about characters to avoid in object key names,
+ // see [Object key naming guidelines].
+ //
+ // When using the URL encoding type, non-ASCII characters that are used in an
+ // object's key name will be percent-encoded according to UTF-8 code values. For
+ // example, the object test_file(3).png will appear as test_file%283%29.png .
+ //
+ // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines
+ // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html
+ EncodingType types.EncodingType
+
+ // A flag that indicates whether Amazon S3 returned all of the results that
+ // satisfied the search criteria.
+ IsTruncated *bool
+
+ // Indicates where in the bucket listing begins. Marker is included in the
+ // response if it was sent with the request.
+ Marker *string
+
+ // The maximum number of keys returned in the response body.
+ MaxKeys *int32
+
+ // The bucket name.
+ Name *string
+
+ // When the response is truncated (the IsTruncated element value in the response
+ // is true ), you can use the key name in this field as the marker parameter in
+ // the subsequent request to get the next set of objects. Amazon S3 lists objects
+ // in alphabetical order.
+ //
+ // This element is returned only if you have the delimiter request parameter
+ // specified. If the response does not include the NextMarker element and it is
+ // truncated, you can use the value of the last Key element in the response as the
+ // marker parameter in the subsequent request to get the next set of object keys.
+ NextMarker *string
+
+ // Keys that begin with the indicated prefix.
+ Prefix *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListObjects{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjects{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjects"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListObjectsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjects(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListObjectsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *ListObjectsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opListObjects(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListObjects",
+ }
+}
+
+// getListObjectsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getListObjectsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListObjectsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListObjectsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListObjectsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go
new file mode 100644
index 000000000..f10ae67d5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go
@@ -0,0 +1,591 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns some or all (up to 1,000) of the objects in a bucket with each request.
+// You can use the request parameters as selection criteria to return a subset of
+// the objects in a bucket. A 200 OK response can contain valid or invalid XML.
+// Make sure to design your application to parse the contents of the response and
+// handle it appropriately. For more information about listing objects, see [Listing object keys programmatically]in the
+// Amazon S3 User Guide. To get a list of your buckets, see [ListBuckets].
+//
+// - General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't
+// return prefixes that are related only to in-progress multipart uploads.
+//
+// - Directory buckets - For directory buckets, ListObjectsV2 response includes
+// the prefixes that are related only to in-progress multipart uploads.
+//
+// - Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support
+// virtual-hosted-style requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - To use this operation, you must have
+// READ access to the bucket. You must have permission to perform the
+// s3:ListBucket action. The bucket owner has this permission by default and can
+// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations]
+// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// Sorting order of returned objects
+//
+// - General purpose bucket - For general purpose buckets, ListObjectsV2 returns
+// objects in lexicographical order based on their key names.
+//
+// - Directory bucket - For directory buckets, ListObjectsV2 does not return
+// objects in lexicographical order.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// This section describes the latest revision of this action. We recommend that
+// you use this revised API operation for application development. For backward
+// compatibility, Amazon S3 continues to support the prior version of this API
+// operation, [ListObjects].
+//
+// The following operations are related to ListObjectsV2 :
+//
+// [GetObject]
+//
+// [PutObject]
+//
+// [CreateBucket]
+//
+// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Listing object keys programmatically]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html
+// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) {
+ if params == nil {
+ params = &ListObjectsV2Input{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListObjectsV2", params, optFns, c.addOperationListObjectsV2Middlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListObjectsV2Output)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListObjectsV2Input struct {
+
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // ContinuationToken indicates to Amazon S3 that the list is being continued on
+ // this bucket with a token. ContinuationToken is obfuscated and is not a real
+ // key. You can use this ContinuationToken for pagination of the list results.
+ ContinuationToken *string
+
+ // A delimiter is a character that you use to group keys.
+ //
+ // - Directory buckets - For directory buckets, / is the only supported delimiter.
+ //
+ // - Directory buckets - When you query ListObjectsV2 with a delimiter during
+ // in-progress multipart uploads, the CommonPrefixes response parameter contains
+ // the prefixes that are associated with the in-progress multipart uploads. For
+ // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide.
+ //
+ // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are
+ // encoded only in UTF-8. An object key can contain any Unicode character. However,
+ // the XML 1.0 parser can't parse certain characters, such as characters with an
+ // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you
+ // can add this parameter to request that Amazon S3 encode the keys in the
+ // response. For more information about characters to avoid in object key names,
+ // see [Object key naming guidelines].
+ //
+ // When using the URL encoding type, non-ASCII characters that are used in an
+ // object's key name will be percent-encoded according to UTF-8 code values. For
+ // example, the object test_file(3).png will appear as test_file%283%29.png .
+ //
+ // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines
+ // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html
+ EncodingType types.EncodingType
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The owner field is not present in ListObjectsV2 by default. If you want to
+ // return the owner field with each key in the result, then set the FetchOwner
+ // field to true .
+ //
+ // Directory buckets - For directory buckets, the bucket owner is returned as the
+ // object owner for all objects.
+ FetchOwner *bool
+
+ // Sets the maximum number of keys returned in the response. By default, the
+ // action returns up to 1,000 key names. The response might contain fewer keys but
+ // will never contain more.
+ MaxKeys *int32
+
+ // Specifies the optional fields that you want returned in the response. Fields
+ // that you do not specify are not returned.
+ //
+ // This functionality is not supported for directory buckets.
+ OptionalObjectAttributes []types.OptionalObjectAttributes
+
+ // Limits the response to keys that begin with the specified prefix.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
+ Prefix *string
+
+ // Confirms that the requester knows that she or he will be charged for the list
+ // objects request in V2 style. Bucket owners need not specify this parameter in
+ // their requests.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestPayer types.RequestPayer
+
+ // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. StartAfter can be any key in the bucket.
+ //
+ // This functionality is not supported for directory buckets.
+ StartAfter *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Prefix = in.Prefix
+
+}
+
+type ListObjectsV2Output struct {
+
+ // All of the keys (up to 1,000) that share the same prefix are grouped together.
+ // When counting the total numbers of returns by this API operation, this group of
+ // keys is considered as one item.
+ //
+ // A response can contain CommonPrefixes only if you specify a delimiter.
+ //
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the next
+ // occurrence of the string specified by a delimiter.
+ //
+ // CommonPrefixes lists keys that act like subdirectories in the directory
+ // specified by Prefix .
+ //
+ // For example, if the prefix is notes/ and the delimiter is a slash ( / ) as in
+ // notes/summer/july , the common prefix is notes/summer/ . All of the keys that
+ // roll up into a common prefix count as a single return when calculating the
+ // number of returns.
+ //
+ // - Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
+ //
+ // - Directory buckets - When you query ListObjectsV2 with a delimiter during
+ // in-progress multipart uploads, the CommonPrefixes response parameter contains
+ // the prefixes that are associated with the in-progress multipart uploads. For
+ // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide.
+ //
+ // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html
+ CommonPrefixes []types.CommonPrefix
+
+ // Metadata about each object returned.
+ Contents []types.Object
+
+ // If ContinuationToken was sent with the request, it is included in the
+ // response. You can use the returned ContinuationToken for pagination of the list
+ // response. You can use this ContinuationToken for pagination of the list
+ // results.
+ ContinuationToken *string
+
+ // Causes keys that contain the same string between the prefix and the first
+ // occurrence of the delimiter to be rolled up into a single result element in the
+ // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in
+ // the response. Each rolled-up result counts as only one return against the
+ // MaxKeys value.
+ //
+ // Directory buckets - For directory buckets, / is the only supported delimiter.
+ Delimiter *string
+
+ // Encoding type used by Amazon S3 to encode object key names in the XML response.
+ //
+ // If you specify the encoding-type request parameter, Amazon S3 includes this
+ // element in the response, and returns encoded key name values in the following
+ // response elements:
+ //
+ // Delimiter, Prefix, Key, and StartAfter .
+ EncodingType types.EncodingType
+
+ // Set to false if all of the results were returned. Set to true if more keys are
+ // available to return. If the number of results exceeds that specified by MaxKeys
+ // , all of the results might not be returned.
+ IsTruncated *bool
+
+ // KeyCount is the number of keys returned with this request. KeyCount will always
+ // be less than or equal to the MaxKeys field. For example, if you ask for 50
+ // keys, your result will include 50 keys or fewer.
+ KeyCount *int32
+
+ // Sets the maximum number of keys returned in the response. By default, the
+ // action returns up to 1,000 key names. The response might contain fewer keys but
+ // will never contain more.
+ MaxKeys *int32
+
+ // The bucket name.
+ Name *string
+
+ // NextContinuationToken is sent when isTruncated is true, which means there are
+ // more keys in the bucket that can be listed. The next list requests to Amazon S3
+ // can be continued with this NextContinuationToken . NextContinuationToken is
+ // obfuscated and is not a real key
+ NextContinuationToken *string
+
+ // Keys that begin with the indicated prefix.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a
+ // delimiter ( / ) are supported.
+ Prefix *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // If StartAfter was sent with the request, it is included in the response.
+ //
+ // This functionality is not supported for directory buckets.
+ StartAfter *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectsV2{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListObjectsV2{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectsV2"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListObjectsV2(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2
+type ListObjectsV2PaginatorOptions struct {
+ // Sets the maximum number of keys returned in the response. By default, the
+ // action returns up to 1,000 key names. The response might contain fewer keys but
+ // will never contain more.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListObjectsV2Paginator is a paginator for ListObjectsV2
+type ListObjectsV2Paginator struct {
+ options ListObjectsV2PaginatorOptions
+ client ListObjectsV2APIClient
+ params *ListObjectsV2Input
+ nextToken *string
+ firstPage bool
+}
+
+// NewListObjectsV2Paginator returns a new ListObjectsV2Paginator
+func NewListObjectsV2Paginator(client ListObjectsV2APIClient, params *ListObjectsV2Input, optFns ...func(*ListObjectsV2PaginatorOptions)) *ListObjectsV2Paginator {
+ if params == nil {
+ params = &ListObjectsV2Input{}
+ }
+
+ options := ListObjectsV2PaginatorOptions{}
+ if params.MaxKeys != nil {
+ options.Limit = *params.MaxKeys
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListObjectsV2Paginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.ContinuationToken,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListObjectsV2Paginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListObjectsV2 page.
+func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsV2Output, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.ContinuationToken = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.MaxKeys = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = nil
+ if result.IsTruncated != nil && *result.IsTruncated {
+ p.nextToken = result.NextContinuationToken
+ }
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+func (v *ListObjectsV2Input) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation.
+type ListObjectsV2APIClient interface {
+ ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error)
+}
+
+var _ ListObjectsV2APIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListObjectsV2",
+ }
+}
+
+// getListObjectsV2BucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getListObjectsV2BucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListObjectsV2Input)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListObjectsV2UpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListObjectsV2BucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go
new file mode 100644
index 000000000..d30650e42
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go
@@ -0,0 +1,569 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Lists the parts that have been uploaded for a specific multipart upload.
+//
+// To use this operation, you must provide the upload ID in the request. You
+// obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload].
+//
+// The ListParts request returns a maximum of 1,000 uploaded parts. The limit of
+// 1,000 parts is also the default value. You can restrict the number of parts in a
+// response by specifying the max-parts request parameter. If your multipart
+// upload consists of more than 1,000 parts, the response returns an IsTruncated
+// field with the value of true , and a NextPartNumberMarker element. To list
+// remaining uploaded parts, in subsequent ListParts requests, include the
+// part-number-marker query string parameter and set its value to the
+// NextPartNumberMarker field value from the previous response.
+//
+// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+// - General purpose bucket permissions - For information about permissions
+// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide.
+//
+// If the upload was created using server-side encryption with Key Management
+//
+// Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon
+// Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt
+// action for the ListParts request to succeed.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following operations are related to ListParts :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [AbortMultipartUpload]
+//
+// [GetObjectAttributes]
+//
+// [ListMultipartUploads]
+//
+// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+//
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) {
+ if params == nil {
+ params = &ListPartsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListParts", params, optFns, c.addOperationListPartsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListPartsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListPartsInput struct {
+
+ // The name of the bucket to which the parts are being uploaded.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ //
+ // This member is required.
+ UploadId *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Sets the maximum number of parts to return.
+ MaxParts *int32
+
+ // Specifies the part after which listing should begin. Only parts with higher
+ // part numbers will be listed.
+ PartNumberMarker *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // The server-side encryption (SSE) algorithm used to encrypt the object. This
+ // parameter is needed only when the object was created using a checksum algorithm.
+ // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerAlgorithm *string
+
+ // The server-side encryption (SSE) customer managed key. This parameter is needed
+ // only when the object was created using a checksum algorithm. For more
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerKey *string
+
+ // The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ // needed only when the object was created using a checksum algorithm. For more
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerKeyMD5 *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListPartsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type ListPartsOutput struct {
+
+ // If the bucket has a lifecycle rule configured with an action to abort
+ // incomplete multipart uploads and the prefix in the lifecycle rule matches the
+ // object name in the request, then the response includes this header indicating
+ // when the initiated multipart upload will become eligible for abort operation.
+ // For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration].
+ //
+ // The response will also include the x-amz-abort-rule-id header that will provide
+ // the ID of the lifecycle configuration rule that defines this action.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
+ AbortDate *time.Time
+
+ // This header is returned along with the x-amz-abort-date header. It identifies
+ // applicable lifecycle configuration rule that defines the action to abort
+ // incomplete multipart uploads.
+ //
+ // This functionality is not supported for directory buckets.
+ AbortRuleId *string
+
+ // The name of the bucket to which the multipart upload was initiated. Does not
+ // return the access point ARN or access point alias if used.
+ Bucket *string
+
+ // The algorithm that was used to create a checksum of the object.
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The checksum type, which determines how part-level checksums are combined to
+ // create an object-level checksum for multipart objects. You can use this header
+ // response to verify that the checksum type that is received is the same checksum
+ // type that was specified in CreateMultipartUpload request. For more information,
+ // see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType types.ChecksumType
+
+ // Container element that identifies who initiated the multipart upload. If the
+ // initiator is an Amazon Web Services account, this element provides the same
+ // information as the Owner element. If the initiator is an IAM User, this element
+ // provides the user ARN and display name.
+ Initiator *types.Initiator
+
+ // Indicates whether the returned list of parts is truncated. A true value
+ // indicates that the list was truncated. A list can be truncated if the number of
+ // parts exceeds the limit returned in the MaxParts element.
+ IsTruncated *bool
+
+ // Object key for which the multipart upload was initiated.
+ Key *string
+
+ // Maximum number of parts that were allowed in the response.
+ MaxParts *int32
+
+ // When a list is truncated, this element specifies the last part in the list, as
+ // well as the value to use for the part-number-marker request parameter in a
+ // subsequent request.
+ NextPartNumberMarker *string
+
+ // Container element that identifies the object owner, after the object is
+ // created. If multipart upload is initiated by an IAM user, this element provides
+ // the parent account ID and display name.
+ //
+ // Directory buckets - The bucket owner is returned as the object owner for all
+ // the parts.
+ Owner *types.Owner
+
+ // Specifies the part after which listing should begin. Only parts with higher
+ // part numbers will be listed.
+ PartNumberMarker *string
+
+ // Container for elements related to a particular part. A response can contain
+ // zero or more Part elements.
+ Parts []types.Part
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // The class of storage used to store the uploaded object.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
+ StorageClass types.StorageClass
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ UploadId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpListParts{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpListParts{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListParts"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListPartsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListParts(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addListPartsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListPartsPaginatorOptions is the paginator options for ListParts
+type ListPartsPaginatorOptions struct {
+ // Sets the maximum number of parts to return.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListPartsPaginator is a paginator for ListParts
+type ListPartsPaginator struct {
+ options ListPartsPaginatorOptions
+ client ListPartsAPIClient
+ params *ListPartsInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListPartsPaginator returns a new ListPartsPaginator
+func NewListPartsPaginator(client ListPartsAPIClient, params *ListPartsInput, optFns ...func(*ListPartsPaginatorOptions)) *ListPartsPaginator {
+ if params == nil {
+ params = &ListPartsInput{}
+ }
+
+ options := ListPartsPaginatorOptions{}
+ if params.MaxParts != nil {
+ options.Limit = *params.MaxParts
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListPartsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.PartNumberMarker,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListPartsPaginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListParts page.
+func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPartsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.PartNumberMarker = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.MaxParts = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListParts(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = nil
+ if result.IsTruncated != nil && *result.IsTruncated {
+ p.nextToken = result.NextPartNumberMarker
+ }
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+func (v *ListPartsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+// ListPartsAPIClient is a client that implements the ListParts operation.
+type ListPartsAPIClient interface {
+ ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error)
+}
+
+var _ ListPartsAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListParts",
+ }
+}
+
+// getListPartsBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getListPartsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*ListPartsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addListPartsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getListPartsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go
new file mode 100644
index 000000000..51de19c80
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go
@@ -0,0 +1,305 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer
+// Acceleration is a bucket-level feature that enables you to perform faster data
+// transfers to Amazon S3.
+//
+// To use this operation, you must have permission to perform the
+// s3:PutAccelerateConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// The Transfer Acceleration state of a bucket can be set to one of the following
+// two values:
+//
+// - Enabled – Enables accelerated data transfers to the bucket.
+//
+// - Suspended – Disables accelerated data transfers to the bucket.
+//
+// The [GetBucketAccelerateConfiguration] action returns the transfer acceleration state of a bucket.
+//
+// After setting the Transfer Acceleration state of a bucket to Enabled, it might
+// take up to thirty minutes before the data transfer rates to the bucket increase.
+//
+// The name of the bucket used for Transfer Acceleration must be DNS-compliant and
+// must not contain periods (".").
+//
+// For more information about transfer acceleration, see [Transfer Acceleration].
+//
+// The following operations are related to PutBucketAccelerateConfiguration :
+//
+// [GetBucketAccelerateConfiguration]
+//
+// [CreateBucket]
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+// [GetBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketAccelerateConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketAccelerateConfiguration", params, optFns, c.addOperationPutBucketAccelerateConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketAccelerateConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketAccelerateConfigurationInput struct {
+
+ // Container for setting the transfer acceleration state.
+ //
+ // This member is required.
+ AccelerateConfiguration *types.AccelerateConfiguration
+
+ // The name of the bucket for which the accelerate configuration is set.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketAccelerateConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAccelerateConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAccelerateConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAccelerateConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketAccelerateConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketAccelerateConfiguration",
+ }
+}
+
+// getPutBucketAccelerateConfigurationRequestAlgorithmMember gets the request
+// checksum algorithm value provided as input.
+func getPutBucketAccelerateConfigurationRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketAccelerateConfigurationInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketAccelerateConfigurationRequestAlgorithmMember,
+ RequireChecksum: false,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketAccelerateConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketAccelerateConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketAccelerateConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketAccelerateConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketAccelerateConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go
new file mode 100644
index 000000000..a7d37a86b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go
@@ -0,0 +1,452 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Sets the permissions on an existing bucket using access control lists (ACL).
+// For more information, see [Using ACLs]. To set the ACL of a bucket, you must have the
+// WRITE_ACP permission.
+//
+// You can use one of the following two ways to set a bucket's permissions:
+//
+// - Specify the ACL in the request body
+//
+// - Specify permissions using request headers
+//
+// You cannot specify access permission using both the body and the request
+// headers.
+//
+// Depending on your application needs, you may choose to set the ACL on a bucket
+// using either the request body or the headers. For example, if you have an
+// existing application that updates a bucket ACL using the request body, then you
+// can continue to use that approach.
+//
+// If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
+// ACLs are disabled and no longer affect permissions. You must use policies to
+// grant access to your bucket and the objects in it. Requests to set ACLs or
+// update ACLs fail and return the AccessControlListNotSupported error code.
+// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the
+// Amazon S3 User Guide.
+//
+// Permissions You can set access permissions by using one of the following
+// methods:
+//
+// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a
+// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined
+// set of grantees and permissions. Specify the canned ACL name as the value of
+// x-amz-acl . If you use this header, you cannot use other access
+// control-specific headers in your request. For more information, see [Canned ACL].
+//
+// - Specify access permissions explicitly with the x-amz-grant-read ,
+// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control
+// headers. When using these headers, you specify explicit access permissions and
+// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the
+// permission. If you use these ACL-specific headers, you cannot use the
+// x-amz-acl header to set a canned ACL. These parameters map to the set of
+// permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview].
+//
+// You specify each grantee as a type=value pair, where the type is one of the
+//
+// following:
+//
+// - id – if the value specified is the canonical user ID of an Amazon Web
+// Services account
+//
+// - uri – if you are granting permissions to a predefined group
+//
+// - emailAddress – if the value specified is the email address of an Amazon Web
+// Services account
+//
+// Using email addresses to specify a grantee is only supported in the following
+//
+// Amazon Web Services Regions:
+//
+// - US East (N. Virginia)
+//
+// - US West (N. California)
+//
+// - US West (Oregon)
+//
+// - Asia Pacific (Singapore)
+//
+// - Asia Pacific (Sydney)
+//
+// - Asia Pacific (Tokyo)
+//
+// - Europe (Ireland)
+//
+// - South America (São Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+//
+// Amazon Web Services General Reference.
+//
+// For example, the following x-amz-grant-write header grants create, overwrite,
+//
+// and delete objects permission to LogDelivery group predefined by Amazon S3 and
+// two Amazon Web Services accounts identified by their email addresses.
+//
+// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
+//
+// id="111122223333", id="555566667777"
+//
+// You can use either a canned ACL or specify access permissions explicitly. You
+// cannot do both.
+//
+// Grantee Values You can specify the person (grantee) to whom you're assigning
+// access rights (using request elements) in the following ways:
+//
+// - By the person's ID:
+//
+// <>ID<><>GranteesEmail<>
+//
+// DisplayName is optional and ignored in the request
+//
+// - By URI:
+//
+// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// - By Email address:
+//
+// <>Grantees@email.com<>&
+//
+// The grantee is resolved to the CanonicalUser and, in a response to a GET Object
+//
+// acl request, appears as the CanonicalUser.
+//
+// Using email addresses to specify a grantee is only supported in the following
+//
+// Amazon Web Services Regions:
+//
+// - US East (N. Virginia)
+//
+// - US West (N. California)
+//
+// - US West (Oregon)
+//
+// - Asia Pacific (Singapore)
+//
+// - Asia Pacific (Sydney)
+//
+// - Asia Pacific (Tokyo)
+//
+// - Europe (Ireland)
+//
+// - South America (São Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+//
+// Amazon Web Services General Reference.
+//
+// The following operations are related to PutBucketAcl :
+//
+// [CreateBucket]
+//
+// [DeleteBucket]
+//
+// [GetObjectAcl]
+//
+// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
+// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
+// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) {
+ if params == nil {
+ params = &PutBucketAclInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketAcl", params, optFns, c.addOperationPutBucketAclMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketAclOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketAclInput struct {
+
+ // The bucket to which to apply the ACL.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The canned ACL to apply to the bucket.
+ ACL types.BucketCannedACL
+
+ // Contains the elements that set the ACL permissions for an object per grantee.
+ AccessControlPolicy *types.AccessControlPolicy
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, go to [RFC 1864.]
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string
+
+ // Allows grantee to create new objects in the bucket.
+ //
+ // For the bucket and object owners of existing objects, also allows deletions and
+ // overwrites of those objects.
+ GrantWrite *string
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketAclOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAcl"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketAclValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAcl(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketAclInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketAclUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketAclInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketAcl",
+ }
+}
+
+// getPutBucketAclRequestAlgorithmMember gets the request checksum algorithm value
+// provided as input.
+func getPutBucketAclRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketAclInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketAclRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketAclBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketAclBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketAclInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketAclUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketAclBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go
new file mode 100644
index 000000000..8e13fbcda
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go
@@ -0,0 +1,291 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Sets an analytics configuration for the bucket (specified by the analytics
+// configuration ID). You can have up to 1,000 analytics configurations per bucket.
+//
+// You can choose to have storage class analysis export analysis reports sent to a
+// comma-separated values (CSV) flat file. See the DataExport request element.
+// Reports are updated daily and are based on the object filters that you
+// configure. When selecting data export, you specify a destination bucket and an
+// optional destination prefix where the file is written. You can export the data
+// to a destination bucket in a different account. However, the destination bucket
+// must be in the same Region as the bucket that you are making the PUT analytics
+// configuration to. For more information, see [Amazon S3 Analytics – Storage Class Analysis].
+//
+// You must create a bucket policy on the destination bucket where the exported
+// file is written to grant permissions to Amazon S3 to write objects to the
+// bucket. For an example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis].
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// PutBucketAnalyticsConfiguration has the following special errors:
+//
+// - HTTP Error: HTTP 400 Bad Request
+//
+// - Code: InvalidArgument
+//
+// - Cause: Invalid argument.
+//
+// - HTTP Error: HTTP 400 Bad Request
+//
+// - Code: TooManyConfigurations
+//
+// - Cause: You are attempting to create a new configuration but have already
+// reached the 1,000-configuration limit.
+//
+// - HTTP Error: HTTP 403 Forbidden
+//
+// - Code: AccessDenied
+//
+// - Cause: You are not the owner of the specified bucket, or you do not have
+// the s3:PutAnalyticsConfiguration bucket permission to set the configuration on
+// the bucket.
+//
+// The following operations are related to PutBucketAnalyticsConfiguration :
+//
+// [GetBucketAnalyticsConfiguration]
+//
+// [DeleteBucketAnalyticsConfiguration]
+//
+// [ListBucketAnalyticsConfigurations]
+//
+// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html
+// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9
+// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html
+// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketAnalyticsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketAnalyticsConfiguration", params, optFns, c.addOperationPutBucketAnalyticsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketAnalyticsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketAnalyticsConfigurationInput struct {
+
+ // The configuration and any analyses for the analytics filter.
+ //
+ // This member is required.
+ AnalyticsConfiguration *types.AnalyticsConfiguration
+
+ // The name of the bucket to which an analytics configuration is stored.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID that identifies the analytics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketAnalyticsConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketAnalyticsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAnalyticsConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketAnalyticsConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketAnalyticsConfiguration",
+ }
+}
+
+// getPutBucketAnalyticsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketAnalyticsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketAnalyticsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketAnalyticsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketAnalyticsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go
new file mode 100644
index 000000000..653a1598a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go
@@ -0,0 +1,330 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Sets the cors configuration for your bucket. If the configuration exists,
+// Amazon S3 replaces it.
+//
+// To use this operation, you must be allowed to perform the s3:PutBucketCORS
+// action. By default, the bucket owner has this permission and can grant it to
+// others.
+//
+// You set this configuration on a bucket so that the bucket can service
+// cross-origin requests. For example, you might want to enable a request whose
+// origin is http://www.example.com to access your Amazon S3 bucket at
+// my.example.bucket.com by using the browser's XMLHttpRequest capability.
+//
+// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
+// subresource to the bucket. The cors subresource is an XML document in which you
+// configure rules that identify origins and the HTTP methods that can be executed
+// on your bucket. The document is limited to 64 KB in size.
+//
+// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS
+// request) against a bucket, it evaluates the cors configuration on the bucket
+// and uses the first CORSRule rule that matches the incoming browser request to
+// enable a cross-origin request. For a rule to match, the following conditions
+// must be met:
+//
+// - The request's Origin header must match AllowedOrigin elements.
+//
+// - The request method (for example, GET, PUT, HEAD, and so on) or the
+// Access-Control-Request-Method header in case of a pre-flight OPTIONS request
+// must be one of the AllowedMethod elements.
+//
+// - Every header specified in the Access-Control-Request-Headers request header
+// of a pre-flight request must match an AllowedHeader element.
+//
+// For more information about CORS, go to [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide.
+//
+// The following operations are related to PutBucketCors :
+//
+// [GetBucketCors]
+//
+// [DeleteBucketCors]
+//
+// [RESTOPTIONSobject]
+//
+// [GetBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html
+// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
+// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html
+// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html
+func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) {
+ if params == nil {
+ params = &PutBucketCorsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketCors", params, optFns, c.addOperationPutBucketCorsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketCorsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketCorsInput struct {
+
+ // Specifies the bucket impacted by the cors configuration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Describes the cross-origin access configuration for objects in an Amazon S3
+ // bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide.
+ //
+ // [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
+ //
+ // This member is required.
+ CORSConfiguration *types.CORSConfiguration
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, go to [RFC 1864.]
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketCorsOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketCors{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketCors"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketCors(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketCorsInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketCorsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketCorsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketCors(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketCors",
+ }
+}
+
+// getPutBucketCorsRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketCorsInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketCorsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketCorsRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketCorsBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketCorsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketCorsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketCorsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketCorsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go
new file mode 100644
index 000000000..87d25702c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go
@@ -0,0 +1,407 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation configures default encryption and Amazon S3 Bucket Keys for an
+// existing bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// By default, all buckets have a default encryption configuration that uses
+// server-side encryption with Amazon S3 managed keys (SSE-S3).
+//
+// - General purpose buckets
+//
+// - You can optionally configure default encryption for a bucket by using
+// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or
+// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS).
+// If you specify default encryption by using SSE-KMS, you can also configure [Amazon S3 Bucket Keys].
+// For information about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the
+// Amazon S3 User Guide.
+//
+// - If you use PutBucketEncryption to set your [default bucket encryption]to SSE-KMS, you should verify
+// that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID
+// provided in PutBucketEncryption requests.
+//
+// - Directory buckets - You can optionally configure default encryption for a
+// bucket by using server-side encryption with Key Management Service (KMS) keys
+// (SSE-KMS).
+//
+// - We recommend that the bucket's default encryption uses the desired
+// encryption configuration and you don't override the bucket default encryption in
+// your CreateSession requests or PUT object requests. Then, new objects are
+// automatically encrypted with the desired encryption settings. For more
+// information about the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]
+// .
+//
+// - Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket's
+// lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported.
+//
+// - S3 Bucket Keys are always enabled for GET and PUT operations in a directory
+// bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy
+// SSE-KMS encrypted objects from general purpose buckets to directory buckets,
+// from directory buckets to general purpose buckets, or between directory buckets,
+// through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a
+// copy request is made for a KMS-encrypted object.
+//
+// - When you specify an [KMS customer managed key]for encryption in your directory bucket, only use the
+// key ID or key ARN. The key alias format of the KMS key isn't supported.
+//
+// - For directory buckets, if you use PutBucketEncryption to set your [default bucket encryption]to
+// SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption
+// requests.
+//
+// If you're specifying a customer managed KMS key, we recommend using a fully
+// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the
+// key within the requester’s account. This behavior can result in data that's
+// encrypted with a KMS key that belongs to the requester, and not the bucket
+// owner.
+//
+// Also, this action requires Amazon Web Services Signature Version 4. For more
+// information, see [Authenticating Requests (Amazon Web Services Signature Version 4)].
+//
+// Permissions
+//
+// - General purpose bucket permissions - The s3:PutEncryptionConfiguration
+// permission is required in a policy. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation, you
+// must have the s3express:PutEncryptionConfiguration permission in an IAM
+// identity-based policy instead of a bucket policy. Cross-account access to this
+// API operation isn't supported. This operation can only be performed by the
+// Amazon Web Services account that owns the resource. For more information about
+// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// To set a directory bucket default encryption with SSE-KMS, you must also have
+//
+// the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based
+// policies and KMS key policies for the target KMS key.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region-code.amazonaws.com .
+//
+// The following operations are related to PutBucketEncryption :
+//
+// [GetBucketEncryption]
+//
+// [DeleteBucketEncryption]
+//
+// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
+// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+// [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
+// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html
+// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html
+// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+// [default bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html
+// [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job
+// [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) {
+ if params == nil {
+ params = &PutBucketEncryptionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketEncryption", params, optFns, c.addOperationPutBucketEncryptionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketEncryptionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketEncryptionInput struct {
+
+ // Specifies default encryption for a bucket using server-side encryption with
+ // different key options.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region-code.amazonaws.com/bucket-name .
+ // Virtual-hosted-style requests aren't supported. Directory bucket names must be
+ // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must
+ // also follow the format bucket-base-name--zone-id--x-s3 (for example,
+ // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the default server-side-encryption configuration.
+ //
+ // This member is required.
+ ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The Base64 encoded 128-bit MD5 digest of the server-side encryption
+ // configuration.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // This functionality is not supported for directory buckets.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketEncryptionOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketEncryption{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketEncryption"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketEncryption(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketEncryptionInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketEncryptionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketEncryptionInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketEncryption",
+ }
+}
+
+// getPutBucketEncryptionRequestAlgorithmMember gets the request checksum
+// algorithm value provided as input.
+func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketEncryptionInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketEncryptionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketEncryptionRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketEncryptionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutBucketEncryptionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketEncryptionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketEncryptionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketEncryptionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go
new file mode 100644
index 000000000..c032b4039
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go
@@ -0,0 +1,278 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can
+// have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
+//
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
+// without performance impact or operational overhead. S3 Intelligent-Tiering
+// delivers automatic cost savings in three low latency and high throughput access
+// tiers. To get the lowest storage cost on data that can be accessed in minutes to
+// hours, you can choose to activate additional archiving capabilities.
+//
+// The S3 Intelligent-Tiering storage class is the ideal storage class for data
+// with unknown, changing, or unpredictable access patterns, independent of object
+// size or retention period. If the size of an object is less than 128 KB, it is
+// not monitored and not eligible for auto-tiering. Smaller objects can be stored,
+// but they are always charged at the Frequent Access tier rates in the S3
+// Intelligent-Tiering storage class.
+//
+// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// Operations related to PutBucketIntelligentTieringConfiguration include:
+//
+// [DeleteBucketIntelligentTieringConfiguration]
+//
+// [GetBucketIntelligentTieringConfiguration]
+//
+// [ListBucketIntelligentTieringConfigurations]
+//
+// You only need S3 Intelligent-Tiering enabled on a bucket if you want to
+// automatically move objects stored in the S3 Intelligent-Tiering storage class to
+// the Archive Access or Deep Archive Access tier.
+//
+// PutBucketIntelligentTieringConfiguration has the following special errors:
+//
+// HTTP 400 Bad Request Error Code: InvalidArgument
+//
+// Cause: Invalid Argument
+//
+// HTTP 400 Bad Request Error Code: TooManyConfigurations
+//
+// Cause: You are attempting to create a new configuration but have already
+// reached the 1,000-configuration limit.
+//
+// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket,
+// or you do not have the s3:PutIntelligentTieringConfiguration bucket permission
+// to set the configuration on the bucket.
+//
+// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html
+// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
+// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html
+func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketIntelligentTieringConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketIntelligentTieringConfiguration", params, optFns, c.addOperationPutBucketIntelligentTieringConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketIntelligentTieringConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketIntelligentTieringConfigurationInput struct {
+
+ // The name of the Amazon S3 bucket whose configuration you want to modify or
+ // retrieve.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Container for S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ IntelligentTieringConfiguration *types.IntelligentTieringConfiguration
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketIntelligentTieringConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketIntelligentTieringConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketIntelligentTieringConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketIntelligentTieringConfiguration",
+ }
+}
+
+// getPutBucketIntelligentTieringConfigurationBucketMember returns a pointer to
+// string denoting a provided bucket member valueand a boolean indicating if the
+// input has a modeled bucket name,
+func getPutBucketIntelligentTieringConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketIntelligentTieringConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketIntelligentTieringConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go
new file mode 100644
index 000000000..773ba76e5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go
@@ -0,0 +1,300 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// This implementation of the PUT action adds an inventory configuration
+// (identified by the inventory ID) to the bucket. You can have up to 1,000
+// inventory configurations per bucket.
+//
+// Amazon S3 inventory generates inventories of the objects in the bucket on a
+// daily or weekly basis, and the results are published to a flat file. The bucket
+// that is inventoried is called the source bucket, and the bucket where the
+// inventory flat file is stored is called the destination bucket. The destination
+// bucket must be in the same Amazon Web Services Region as the source bucket.
+//
+// When you configure an inventory for a source bucket, you specify the
+// destination bucket where you want the inventory to be stored, and whether to
+// generate the inventory daily or weekly. You can also configure what object
+// metadata to include and whether to inventory all object versions or only current
+// versions. For more information, see [Amazon S3 Inventory]in the Amazon S3 User Guide.
+//
+// You must create a bucket policy on the destination bucket to grant permissions
+// to Amazon S3 to write objects to the bucket in the defined location. For an
+// example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis].
+//
+// Permissions To use this operation, you must have permission to perform the
+// s3:PutInventoryConfiguration action. The bucket owner has this permission by
+// default and can grant this permission to others.
+//
+// The s3:PutInventoryConfiguration permission allows a user to create an [S3 Inventory] report
+// that includes all object metadata fields available and to specify the
+// destination bucket to store the inventory. A user with read access to objects in
+// the destination bucket can also access all object metadata fields that are
+// available in the inventory report.
+//
+// To restrict access to an inventory report, see [Restricting access to an Amazon S3 Inventory report] in the Amazon S3 User Guide.
+// For more information about the metadata fields available in S3 Inventory, see [Amazon S3 Inventory lists]
+// in the Amazon S3 User Guide. For more information about permissions, see [Permissions related to bucket subresource operations]and [Identity and access management in Amazon S3]
+// in the Amazon S3 User Guide.
+//
+// PutBucketInventoryConfiguration has the following special errors:
+//
+// HTTP 400 Bad Request Error Code: InvalidArgument
+//
+// Cause: Invalid Argument
+//
+// HTTP 400 Bad Request Error Code: TooManyConfigurations
+//
+// Cause: You are attempting to create a new configuration but have already
+// reached the 1,000-configuration limit.
+//
+// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket,
+// or you do not have the s3:PutInventoryConfiguration bucket permission to set
+// the configuration on the bucket.
+//
+// The following operations are related to PutBucketInventoryConfiguration :
+//
+// [GetBucketInventoryConfiguration]
+//
+// [DeleteBucketInventoryConfiguration]
+//
+// [ListBucketInventoryConfigurations]
+//
+// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9
+// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html
+// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html
+// [S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html
+// [Permissions related to bucket subresource operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html
+// [Identity and access management in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Restricting access to an Amazon S3 Inventory report]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10
+// [Amazon S3 Inventory lists]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents
+// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html
+func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketInventoryConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketInventoryConfiguration", params, optFns, c.addOperationPutBucketInventoryConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketInventoryConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketInventoryConfigurationInput struct {
+
+ // The name of the bucket where the inventory configuration will be stored.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the inventory configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Specifies the inventory configuration.
+ //
+ // This member is required.
+ InventoryConfiguration *types.InventoryConfiguration
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketInventoryConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketInventoryConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketInventoryConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketInventoryConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketInventoryConfiguration",
+ }
+}
+
+// getPutBucketInventoryConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketInventoryConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketInventoryConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketInventoryConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketInventoryConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go
new file mode 100644
index 000000000..567b80773
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go
@@ -0,0 +1,416 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a new lifecycle configuration for the bucket or replaces an existing
+// lifecycle configuration. Keep in mind that this will overwrite an existing
+// lifecycle configuration, so if you want to retain any configuration details,
+// they must be included in the new lifecycle configuration. For information about
+// lifecycle configuration, see [Managing your storage lifecycle].
+//
+// Bucket lifecycle configuration now supports specifying a lifecycle rule using
+// an object key name prefix, one or more object tags, object size, or any
+// combination of these. Accordingly, this section describes the latest API. The
+// previous version of the API supported filtering based only on an object key name
+// prefix, which is supported for backward compatibility. For the related API
+// description, see [PutBucketLifecycle].
+//
+// Rules Permissions HTTP Host header syntax You specify the lifecycle
+// configuration in your request body. The lifecycle configuration is specified as
+// XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can
+// have up to 1,000 rules. This limit is not adjustable.
+//
+// Bucket lifecycle configuration supports specifying a lifecycle rule using an
+// object key name prefix, one or more object tags, object size, or any combination
+// of these. Accordingly, this section describes the latest API. The previous
+// version of the API supported filtering based only on an object key name prefix,
+// which is supported for backward compatibility for general purpose buckets. For
+// the related API description, see [PutBucketLifecycle].
+//
+// Lifecyle configurations for directory buckets only support expiring objects and
+// cancelling multipart uploads. Expiring of versioned objects,transitions and tag
+// filters are not supported.
+//
+// A lifecycle rule consists of the following:
+//
+// - A filter identifying a subset of objects to which the rule applies. The
+// filter can be based on a key name prefix, object tags, object size, or any
+// combination of these.
+//
+// - A status indicating whether the rule is in effect.
+//
+// - One or more lifecycle transition and expiration actions that you want
+// Amazon S3 to perform on the objects identified by the filter. If the state of
+// your bucket is versioning-enabled or versioning-suspended, you can have many
+// versions of the same object (one current version and zero or more noncurrent
+// versions). Amazon S3 provides predefined actions that you can specify for
+// current and noncurrent object versions.
+//
+// For more information, see [Object Lifecycle Management] and [Lifecycle Configuration Elements].
+//
+// - General purpose bucket permissions - By default, all Amazon S3 resources
+// are private, including buckets, objects, and related subresources (for example,
+// lifecycle configuration and website configuration). Only the resource owner
+// (that is, the Amazon Web Services account that created it) can access the
+// resource. The resource owner can optionally grant access permissions to others
+// by writing an access policy. For this operation, a user must have the
+// s3:PutLifecycleConfiguration permission.
+//
+// You can also explicitly deny permissions. An explicit deny also supersedes any
+//
+// other permissions. If you want to block users or accounts from removing or
+// deleting objects from your bucket, you must deny them permissions for the
+// following actions:
+//
+// - s3:DeleteObject
+//
+// - s3:DeleteObjectVersion
+//
+// - s3:PutLifecycleConfiguration
+//
+// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// - Directory bucket permissions - You must have the
+// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy
+// to use this operation. Cross-account access to this API operation isn't
+// supported. The resource owner can optionally grant access permissions to others
+// by creating a role or user for them as long as they are within the same account
+// as the owner and resource.
+//
+// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in
+//
+// the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+//
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region-code.amazonaws.com/bucket-name
+// . Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Directory buckets - The HTTP Host header syntax is
+// s3express-control.region.amazonaws.com .
+//
+// The following operations are related to PutBucketLifecycleConfiguration :
+//
+// [GetBucketLifecycleConfiguration]
+//
+// [DeleteBucketLifecycle]
+//
+// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
+// [Lifecycle Configuration Elements]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html
+// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
+// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html
+// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketLifecycleConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketLifecycleConfiguration", params, optFns, c.addOperationPutBucketLifecycleConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketLifecycleConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketLifecycleConfigurationInput struct {
+
+ // The name of the bucket for which to set the configuration.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ ExpectedBucketOwner *string
+
+ // Container for lifecycle rules. You can add as many as 1,000 rules.
+ LifecycleConfiguration *types.BucketLifecycleConfiguration
+
+ // Indicates which default minimum object size behavior is applied to the
+ // lifecycle configuration.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ //
+ // - all_storage_classes_128K - Objects smaller than 128 KB will not transition
+ // to any storage class by default.
+ //
+ // - varies_by_storage_class - Objects smaller than 128 KB will transition to
+ // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default,
+ // all other storage classes will prevent transitions smaller than 128 KB.
+ //
+ // To customize the minimum object size for any transition you can add a filter
+ // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body
+ // of your transition rule. Custom filters always take precedence over the default
+ // transition behavior.
+ TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketLifecycleConfigurationOutput struct {
+
+ // Indicates which default minimum object size behavior is applied to the
+ // lifecycle configuration.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ //
+ // - all_storage_classes_128K - Objects smaller than 128 KB will not transition
+ // to any storage class by default.
+ //
+ // - varies_by_storage_class - Objects smaller than 128 KB will transition to
+ // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default,
+ // all other storage classes will prevent transitions smaller than 128 KB.
+ //
+ // To customize the minimum object size for any transition you can add a filter
+ // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body
+ // of your transition rule. Custom filters always take precedence over the default
+ // transition behavior.
+ TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLifecycleConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLifecycleConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLifecycleConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketLifecycleConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketLifecycleConfiguration",
+ }
+}
+
+// getPutBucketLifecycleConfigurationRequestAlgorithmMember gets the request
+// checksum algorithm value provided as input.
+func getPutBucketLifecycleConfigurationRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketLifecycleConfigurationInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketLifecycleConfigurationRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketLifecycleConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketLifecycleConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketLifecycleConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketLifecycleConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketLifecycleConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go
new file mode 100644
index 000000000..53342aca4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go
@@ -0,0 +1,337 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Set the logging parameters for a bucket and to specify permissions for who can
+// view and modify the logging parameters. All logs are saved to buckets in the
+// same Amazon Web Services Region as the source bucket. To set the logging status
+// of a bucket, you must be the bucket owner.
+//
+// The bucket owner is automatically granted FULL_CONTROL to all logs. You use the
+// Grantee request element to grant access to other people. The Permissions
+// request element specifies the kind of access the grantee has to the logs.
+//
+// If the target bucket for log delivery uses the bucket owner enforced setting
+// for S3 Object Ownership, you can't use the Grantee request element to grant
+// access to others. Permissions can only be granted using policies. For more
+// information, see [Permissions for server access log delivery]in the Amazon S3 User Guide.
+//
+// Grantee Values You can specify the person (grantee) to whom you're assigning
+// access rights (by using request elements) in the following ways:
+//
+// - By the person's ID:
+//
+// <>ID<><>GranteesEmail<>
+//
+// DisplayName is optional and ignored in the request.
+//
+// - By Email address:
+//
+// <>Grantees@email.com<>
+//
+// The grantee is resolved to the CanonicalUser and, in a response to a
+//
+// GETObjectAcl request, appears as the CanonicalUser.
+//
+// - By URI:
+//
+// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// To enable logging, you use LoggingEnabled and its children request elements. To
+// disable logging, you use an empty BucketLoggingStatus request element:
+//
+// For more information about server access logging, see [Server Access Logging] in the Amazon S3 User
+// Guide.
+//
+// For more information about creating a bucket, see [CreateBucket]. For more information about
+// returning the logging status of a bucket, see [GetBucketLogging].
+//
+// The following operations are related to PutBucketLogging :
+//
+// [PutObject]
+//
+// [DeleteBucket]
+//
+// [CreateBucket]
+//
+// [GetBucketLogging]
+//
+// [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [GetBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Server Access Logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html
+func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) {
+ if params == nil {
+ params = &PutBucketLoggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketLogging", params, optFns, c.addOperationPutBucketLoggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketLoggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketLoggingInput struct {
+
+ // The name of the bucket for which to set the logging parameters.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for logging status information.
+ //
+ // This member is required.
+ BucketLoggingStatus *types.BucketLoggingStatus
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The MD5 hash of the PutBucketLogging request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketLoggingOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLogging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketLogging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLogging"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketLogging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketLoggingInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketLoggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketLoggingInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketLogging",
+ }
+}
+
+// getPutBucketLoggingRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getPutBucketLoggingRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketLoggingInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketLoggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketLoggingRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketLoggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketLoggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketLoggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketLoggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketLoggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go
new file mode 100644
index 000000000..e386b1e9a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go
@@ -0,0 +1,269 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Sets a metrics configuration (specified by the metrics configuration ID) for
+// the bucket. You can have up to 1,000 metrics configurations per bucket. If
+// you're updating an existing metrics configuration, note that this is a full
+// replacement of the existing metrics configuration. If you don't include the
+// elements you want to keep, they are erased.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutMetricsConfiguration action. The bucket owner has this permission by
+// default. The bucket owner can grant this permission to others. For more
+// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch].
+//
+// The following operations are related to PutBucketMetricsConfiguration :
+//
+// [DeleteBucketMetricsConfiguration]
+//
+// [GetBucketMetricsConfiguration]
+//
+// [ListBucketMetricsConfigurations]
+//
+// PutBucketMetricsConfiguration has the following special error:
+//
+// - Error code: TooManyConfigurations
+//
+// - Description: You are attempting to create a new configuration but have
+// already reached the 1,000-configuration limit.
+//
+// - HTTP Status Code: HTTP 400 Bad Request
+//
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html
+// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html
+// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html
+// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketMetricsConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketMetricsConfiguration", params, optFns, c.addOperationPutBucketMetricsConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketMetricsConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketMetricsConfigurationInput struct {
+
+ // The name of the bucket for which the metrics configuration is set.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The ID used to identify the metrics configuration. The ID has a 64 character
+ // limit and can only contain letters, numbers, periods, dashes, and underscores.
+ //
+ // This member is required.
+ Id *string
+
+ // Specifies the metrics configuration.
+ //
+ // This member is required.
+ MetricsConfiguration *types.MetricsConfiguration
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketMetricsConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketMetricsConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketMetricsConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketMetricsConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketMetricsConfiguration",
+ }
+}
+
+// getPutBucketMetricsConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketMetricsConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketMetricsConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketMetricsConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketMetricsConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go
new file mode 100644
index 000000000..9100fb197
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go
@@ -0,0 +1,283 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Enables notifications of specified events for a bucket. For more information
+// about event notifications, see [Configuring Event Notifications].
+//
+// Using this API, you can replace an existing notification configuration. The
+// configuration is an XML file that defines the event types that you want Amazon
+// S3 to publish and the destination where you want Amazon S3 to publish an event
+// notification when it detects an event of the specified type.
+//
+// By default, your bucket has no event notifications configured. That is, the
+// notification configuration will be an empty NotificationConfiguration .
+//
+// This action replaces the existing notification configuration with the
+// configuration you include in the request body.
+//
+// After Amazon S3 receives this request, it first verifies that any Amazon Simple
+// Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS)
+// destination exists, and that the bucket owner has permission to publish to it by
+// sending a test notification. In the case of Lambda destinations, Amazon S3
+// verifies that the Lambda function permissions grant Amazon S3 permission to
+// invoke the function from the Amazon S3 bucket. For more information, see [Configuring Notifications for Amazon S3 Events].
+//
+// You can disable notifications by adding the empty NotificationConfiguration
+// element.
+//
+// For more information about the number of event notification configurations that
+// you can create per bucket, see [Amazon S3 service quotas]in Amazon Web Services General Reference.
+//
+// By default, only the bucket owner can configure notifications on a bucket.
+// However, bucket owners can use a bucket policy to grant permission to other
+// users to set this configuration with the required s3:PutBucketNotification
+// permission.
+//
+// The PUT notification is an atomic operation. For example, suppose your
+// notification configuration includes SNS topic, SQS queue, and Lambda function
+// configurations. When you send a PUT request with this configuration, Amazon S3
+// sends test messages to your SNS topic. If the message fails, the entire PUT
+// action will fail, and Amazon S3 will not add the configuration to your bucket.
+//
+// If the configuration in the request body includes only one TopicConfiguration
+// specifying only the s3:ReducedRedundancyLostObject event type, the response
+// will also include the x-amz-sns-test-message-id header containing the message
+// ID of the test notification sent to the topic.
+//
+// The following action is related to PutBucketNotificationConfiguration :
+//
+// [GetBucketNotificationConfiguration]
+//
+// [Configuring Notifications for Amazon S3 Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+// [Amazon S3 service quotas]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3
+// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html
+// [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) {
+ if params == nil {
+ params = &PutBucketNotificationConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketNotificationConfiguration", params, optFns, c.addOperationPutBucketNotificationConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketNotificationConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketNotificationConfigurationInput struct {
+
+ // The name of the bucket.
+ //
+ // This member is required.
+ Bucket *string
+
+ // A container for specifying the notification configuration of the bucket. If
+ // this element is empty, notifications are turned off for the bucket.
+ //
+ // This member is required.
+ NotificationConfiguration *types.NotificationConfiguration
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or
+ // false value.
+ SkipDestinationValidation *bool
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketNotificationConfigurationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketNotificationConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketNotificationConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketNotificationConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketNotificationConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketNotificationConfiguration",
+ }
+}
+
+// getPutBucketNotificationConfigurationBucketMember returns a pointer to string
+// denoting a provided bucket member valueand a boolean indicating if the input has
+// a modeled bucket name,
+func getPutBucketNotificationConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketNotificationConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketNotificationConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketNotificationConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go
new file mode 100644
index 000000000..96f27ccd0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go
@@ -0,0 +1,269 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this
+// operation, you must have the s3:PutBucketOwnershipControls permission. For more
+// information about Amazon S3 permissions, see [Specifying permissions in a policy].
+//
+// For information about Amazon S3 Object Ownership, see [Using object ownership].
+//
+// The following operations are related to PutBucketOwnershipControls :
+//
+// # GetBucketOwnershipControls
+//
+// # DeleteBucketOwnershipControls
+//
+// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html
+// [Using object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html
+func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) {
+ if params == nil {
+ params = &PutBucketOwnershipControlsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketOwnershipControls", params, optFns, c.addOperationPutBucketOwnershipControlsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketOwnershipControlsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketOwnershipControlsInput struct {
+
+ // The name of the Amazon S3 bucket whose OwnershipControls you want to set.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or
+ // ObjectWriter) that you want to apply to this Amazon S3 bucket.
+ //
+ // This member is required.
+ OwnershipControls *types.OwnershipControls
+
+ // The MD5 hash of the OwnershipControls request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketOwnershipControlsOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketOwnershipControls{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketOwnershipControls"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketOwnershipControls(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketOwnershipControlsInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketOwnershipControlsInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketOwnershipControls",
+ }
+}
+
+func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: nil,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketOwnershipControlsBucketMember returns a pointer to string denoting
+// a provided bucket member valueand a boolean indicating if the input has a
+// modeled bucket name,
+func getPutBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketOwnershipControlsInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketOwnershipControlsUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketOwnershipControlsBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go
new file mode 100644
index 000000000..1764f4928
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go
@@ -0,0 +1,380 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Regional endpoint. These endpoints support path-style requests
+// in the format https://s3express-control.region-code.amazonaws.com/bucket-name .
+// Virtual-hosted-style requests aren't supported. For more information about
+// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more
+// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions If you are using an identity other than the root user of the Amazon
+// Web Services account that owns the bucket, the calling identity must both have
+// the PutBucketPolicy permissions on the specified bucket and belong to the
+// bucket owner's account in order to use this operation.
+//
+// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access
+// Denied error. If you have the correct permissions, but you're not using an
+// identity that belongs to the bucket owner's account, Amazon S3 returns a 405
+// Method Not Allowed error.
+//
+// To ensure that bucket owners don't inadvertently lock themselves out of their
+// own buckets, the root principal in a bucket owner's Amazon Web Services account
+// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API
+// actions, even if their bucket policy explicitly denies the root principal's
+// access. Bucket owner root principals can only be blocked from performing these
+// API actions by VPC endpoint policies and Amazon Web Services Organizations
+// policies.
+//
+// - General purpose bucket permissions - The s3:PutBucketPolicy permission is
+// required in a policy. For more information about general purpose buckets bucket
+// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation, you
+// must have the s3express:PutBucketPolicy permission in an IAM identity-based
+// policy instead of a bucket policy. Cross-account access to this API operation
+// isn't supported. This operation can only be performed by the Amazon Web Services
+// account that owns the resource. For more information about directory bucket
+// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples]
+// in the Amazon S3 User Guide.
+//
+// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// s3express-control.region-code.amazonaws.com .
+//
+// The following operations are related to PutBucketPolicy :
+//
+// [CreateBucket]
+//
+// [DeleteBucket]
+//
+// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html
+func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) {
+ if params == nil {
+ params = &PutBucketPolicyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketPolicy", params, optFns, c.addOperationPutBucketPolicyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketPolicyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketPolicyInput struct {
+
+ // The name of the bucket.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use path-style requests in the format
+ // https://s3express-control.region-code.amazonaws.com/bucket-name .
+ // Virtual-hosted-style requests aren't supported. Directory bucket names must be
+ // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must
+ // also follow the format bucket-base-name--zone-id--x-s3 (for example,
+ // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The bucket policy as a JSON document.
+ //
+ // For directory buckets, the only IAM action supported in the bucket policy is
+ // s3express:CreateSession .
+ //
+ // This member is required.
+ Policy *string
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3
+ // fails the request with the HTTP status code 400 Bad Request .
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the
+ // supported algorithm from the following list:
+ //
+ // - CRC32
+ //
+ // - CRC32C
+ //
+ // - CRC64NVME
+ //
+ // - SHA1
+ //
+ // - SHA256
+ //
+ // For more information, see [Checking object integrity] in the Amazon S3 User Guide.
+ //
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through
+ // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest
+ // error.
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // Set this parameter to true to confirm that you want to remove your permissions
+ // to change this bucket policy in the future.
+ //
+ // This functionality is not supported for directory buckets.
+ ConfirmRemoveSelfBucketAccess *bool
+
+ // The MD5 hash of the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // This functionality is not supported for directory buckets.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation. If
+ // you specify this header, the request fails with the HTTP status code 501 Not
+ // Implemented .
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketPolicyOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketPolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketPolicy"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketPolicy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketPolicyInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketPolicyUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketPolicyInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketPolicy",
+ }
+}
+
+// getPutBucketPolicyRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getPutBucketPolicyRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketPolicyInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketPolicyInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketPolicyRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketPolicyBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketPolicyBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketPolicyInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketPolicyUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketPolicyBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go
new file mode 100644
index 000000000..1b11ef399
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go
@@ -0,0 +1,348 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Creates a replication configuration or replaces an existing one. For more
+// information, see [Replication]in the Amazon S3 User Guide.
+//
+// Specify the replication configuration in the request body. In the replication
+// configuration, you provide the name of the destination bucket or buckets where
+// you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume
+// to replicate objects on your behalf, and other relevant information. You can
+// invoke this request for a specific Amazon Web Services Region by using the [aws:RequestedRegion]
+// aws:RequestedRegion condition key.
+//
+// A replication configuration must include at least one rule, and can contain a
+// maximum of 1,000. Each rule identifies a subset of objects to replicate by
+// filtering the objects in the source bucket. To choose additional subsets of
+// objects to replicate, add a rule for each subset.
+//
+// To specify a subset of the objects in the source bucket to apply a replication
+// rule to, add the Filter element as a child of the Rule element. You can filter
+// objects based on an object key prefix, one or more object tags, or both. When
+// you add the Filter element in the configuration, you must also add the following
+// elements: DeleteMarkerReplication , Status , and Priority .
+//
+// If you are using an earlier version of the replication configuration, Amazon S3
+// handles replication of delete markers differently. For more information, see [Backward Compatibility].
+//
+// For information about enabling versioning on a bucket, see [Using Versioning].
+//
+// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't
+// replicate objects that are stored at rest using server-side encryption with KMS
+// keys. To replicate Amazon Web Services KMS-encrypted objects, add the following:
+// SourceSelectionCriteria , SseKmsEncryptedObjects , Status ,
+// EncryptionConfiguration , and ReplicaKmsKeyID . For information about
+// replication configuration, see [Replicating Objects Created with SSE Using KMS keys].
+//
+// For information on PutBucketReplication errors, see [List of replication-related error codes]
+//
+// Permissions To create a PutBucketReplication request, you must have
+// s3:PutReplicationConfiguration permissions for the bucket.
+//
+// By default, a resource owner, in this case the Amazon Web Services account that
+// created the bucket, can perform this operation. The resource owner can also
+// grant others permissions to perform the operation. For more information about
+// permissions, see [Specifying Permissions in a Policy]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// To perform this operation, the user or role performing the action must have the [iam:PassRole]
+// permission.
+//
+// The following operations are related to PutBucketReplication :
+//
+// [GetBucketReplication]
+//
+// [DeleteBucketReplication]
+//
+// [iam:PassRole]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html
+// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html
+// [aws:RequestedRegion]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion
+// [Replicating Objects Created with SSE Using KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html
+// [Using Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html
+// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html
+// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList
+// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations
+// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) {
+ if params == nil {
+ params = &PutBucketReplicationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketReplication", params, optFns, c.addOperationPutBucketReplicationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketReplicationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketReplicationInput struct {
+
+ // The name of the bucket
+ //
+ // This member is required.
+ Bucket *string
+
+ // A container for replication rules. You can add up to 1,000 rules. The maximum
+ // size of a replication configuration is 2 MB.
+ //
+ // This member is required.
+ ReplicationConfiguration *types.ReplicationConfiguration
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // A token to allow Object Lock to be enabled for an existing bucket.
+ Token *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketReplicationOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketReplication{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketReplication"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketReplication(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketReplicationInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketReplicationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketReplicationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketReplication",
+ }
+}
+
+// getPutBucketReplicationRequestAlgorithmMember gets the request checksum
+// algorithm value provided as input.
+func getPutBucketReplicationRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketReplicationInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketReplicationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketReplicationRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketReplicationBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutBucketReplicationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketReplicationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketReplicationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketReplicationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go
new file mode 100644
index 000000000..a6d19c609
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go
@@ -0,0 +1,295 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Sets the request payment configuration for a bucket. By default, the bucket
+// owner pays for downloads from the bucket. This configuration parameter enables
+// the bucket owner (only) to specify that the person requesting the download will
+// be charged for the download. For more information, see [Requester Pays Buckets].
+//
+// The following operations are related to PutBucketRequestPayment :
+//
+// [CreateBucket]
+//
+// [GetBucketRequestPayment]
+//
+// [GetBucketRequestPayment]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html
+// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) {
+ if params == nil {
+ params = &PutBucketRequestPaymentInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketRequestPayment", params, optFns, c.addOperationPutBucketRequestPaymentMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketRequestPaymentOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketRequestPaymentInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for Payer.
+ //
+ // This member is required.
+ RequestPaymentConfiguration *types.RequestPaymentConfiguration
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketRequestPaymentOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketRequestPayment{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketRequestPayment{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketRequestPayment"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketRequestPayment(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketRequestPaymentInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketRequestPaymentUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketRequestPaymentInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketRequestPayment",
+ }
+}
+
+// getPutBucketRequestPaymentRequestAlgorithmMember gets the request checksum
+// algorithm value provided as input.
+func getPutBucketRequestPaymentRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketRequestPaymentInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketRequestPaymentInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketRequestPaymentRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketRequestPaymentBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutBucketRequestPaymentBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketRequestPaymentInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketRequestPaymentUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketRequestPaymentBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go
new file mode 100644
index 000000000..db5e62dfd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go
@@ -0,0 +1,327 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Sets the tags for a bucket.
+//
+// Use tags to organize your Amazon Web Services bill to reflect your own cost
+// structure. To do this, sign up to get your Amazon Web Services account bill with
+// tag key values included. Then, to see the cost of combined resources, organize
+// your billing information according to resources with the same tag key values.
+// For example, you can tag several resources with a specific application name, and
+// then organize your billing information to see the total cost of that application
+// across several services. For more information, see [Cost Allocation and Tagging]and [Using Cost Allocation in Amazon S3 Bucket Tags].
+//
+// When this operation sets the tags for a bucket, it will overwrite any current
+// tags the bucket already has. You cannot use this operation to add tags to an
+// existing list of tags.
+//
+// To use this operation, you must have permissions to perform the
+// s3:PutBucketTagging action. The bucket owner has this permission by default and
+// can grant this permission to others. For more information about permissions, see
+// [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources].
+//
+// PutBucketTagging has the following special errors. For more Amazon S3 errors
+// see, [Error Responses].
+//
+// - InvalidTag - The tag provided was not a valid tag. This error can occur if
+// the tag did not pass input validation. For more information, see [Using Cost Allocation in Amazon S3 Bucket Tags].
+//
+// - MalformedXML - The XML provided does not match the schema.
+//
+// - OperationAborted - A conflicting conditional action is currently in progress
+// against this resource. Please try again.
+//
+// - InternalError - The service was unable to apply the provided tag to the
+// bucket.
+//
+// The following operations are related to PutBucketTagging :
+//
+// [GetBucketTagging]
+//
+// [DeleteBucketTagging]
+//
+// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html
+// [Cost Allocation and Tagging]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html
+// [Using Cost Allocation in Amazon S3 Bucket Tags]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) {
+ if params == nil {
+ params = &PutBucketTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketTagging", params, optFns, c.addOperationPutBucketTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketTaggingInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for the TagSet and Tag elements.
+ //
+ // This member is required.
+ Tagging *types.Tagging
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketTaggingOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketTagging"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketTaggingInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketTaggingInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketTagging",
+ }
+}
+
+// getPutBucketTaggingRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getPutBucketTaggingRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketTaggingInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketTaggingRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketTaggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go
new file mode 100644
index 000000000..aa30d5fad
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go
@@ -0,0 +1,331 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// When you enable versioning on a bucket for the first time, it might take a
+// short amount of time for the change to be fully propagated. While this change is
+// propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for
+// requests to objects created or updated after enabling versioning. We recommend
+// that you wait for 15 minutes after enabling versioning before issuing write
+// operations ( PUT or DELETE ) on objects in the bucket.
+//
+// Sets the versioning state of an existing bucket.
+//
+// You can set the versioning state with one of the following values:
+//
+// Enabled—Enables versioning for the objects in the bucket. All objects added to
+// the bucket receive a unique version ID.
+//
+// Suspended—Disables versioning for the objects in the bucket. All objects added
+// to the bucket receive the version ID null.
+//
+// If the versioning state has never been set on a bucket, it has no versioning
+// state; a [GetBucketVersioning]request does not return a versioning state value.
+//
+// In order to enable MFA Delete, you must be the bucket owner. If you are the
+// bucket owner and want to enable MFA Delete in the bucket versioning
+// configuration, you must include the x-amz-mfa request header and the Status and
+// the MfaDelete request elements in a request to set the versioning state of the
+// bucket.
+//
+// If you have an object expiration lifecycle configuration in your non-versioned
+// bucket and you want to maintain the same permanent delete behavior when you
+// enable versioning, you must add a noncurrent expiration policy. The noncurrent
+// expiration lifecycle configuration will manage the deletes of the noncurrent
+// object versions in the version-enabled bucket. (A version-enabled bucket
+// maintains one current and zero or more noncurrent object versions.) For more
+// information, see [Lifecycle and Versioning].
+//
+// The following operations are related to PutBucketVersioning :
+//
+// [CreateBucket]
+//
+// [DeleteBucket]
+//
+// [GetBucketVersioning]
+//
+// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html
+// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html
+// [Lifecycle and Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config
+// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
+func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) {
+ if params == nil {
+ params = &PutBucketVersioningInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketVersioning", params, optFns, c.addOperationPutBucketVersioningMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketVersioningOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketVersioningInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for setting the versioning state.
+ //
+ // This member is required.
+ VersioningConfiguration *types.VersioningConfiguration
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // >The Base64 encoded 128-bit MD5 digest of the data. You must use this header as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The concatenation of the authentication device's serial number, a space, and
+ // the value that is displayed on your authentication device.
+ MFA *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketVersioningOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketVersioning{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketVersioning{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketVersioning"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketVersioning(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketVersioningInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketVersioningUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketVersioningInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketVersioning",
+ }
+}
+
+// getPutBucketVersioningRequestAlgorithmMember gets the request checksum
+// algorithm value provided as input.
+func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketVersioningInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketVersioningInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketVersioningRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketVersioningBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutBucketVersioningBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketVersioningInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketVersioningUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketVersioningBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
new file mode 100644
index 000000000..169b24bcc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go
@@ -0,0 +1,350 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Sets the configuration of the website that is specified in the website
+// subresource. To configure a bucket as a website, you can add this subresource on
+// the bucket with website configuration information such as the file name of the
+// index document and any redirect rules. For more information, see [Hosting Websites on Amazon S3].
+//
+// This PUT action requires the S3:PutBucketWebsite permission. By default, only
+// the bucket owner can configure the website attached to a bucket; however, bucket
+// owners can allow other users to set the website configuration by writing a
+// bucket policy that grants them the S3:PutBucketWebsite permission.
+//
+// To redirect all website requests sent to the bucket's website endpoint, you add
+// a website configuration with the following elements. Because all requests are
+// sent to another website, you don't need to provide index document name for the
+// bucket.
+//
+// - WebsiteConfiguration
+//
+// - RedirectAllRequestsTo
+//
+// - HostName
+//
+// - Protocol
+//
+// If you want granular control over redirects, you can use the following elements
+// to add routing rules that describe conditions for redirecting requests and
+// information about the redirect destination. In this case, the website
+// configuration must provide an index document for the bucket, because some
+// requests might not be redirected.
+//
+// - WebsiteConfiguration
+//
+// - IndexDocument
+//
+// - Suffix
+//
+// - ErrorDocument
+//
+// - Key
+//
+// - RoutingRules
+//
+// - RoutingRule
+//
+// - Condition
+//
+// - HttpErrorCodeReturnedEquals
+//
+// - KeyPrefixEquals
+//
+// - Redirect
+//
+// - Protocol
+//
+// - HostName
+//
+// - ReplaceKeyPrefixWith
+//
+// - ReplaceKeyWith
+//
+// - HttpRedirectCode
+//
+// Amazon S3 has a limitation of 50 routing rules per website configuration. If
+// you require more than 50 routing rules, you can use object redirect. For more
+// information, see [Configuring an Object Redirect]in the Amazon S3 User Guide.
+//
+// The maximum request length is limited to 128 KB.
+//
+// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html
+// [Configuring an Object Redirect]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html
+func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) {
+ if params == nil {
+ params = &PutBucketWebsiteInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutBucketWebsite", params, optFns, c.addOperationPutBucketWebsiteMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutBucketWebsiteOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutBucketWebsiteInput struct {
+
+ // The bucket name.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Container for the request.
+ //
+ // This member is required.
+ WebsiteConfiguration *types.WebsiteConfiguration
+
+ // Indicates the algorithm used to create the checksum for the request when you
+ // use the SDK. This header will not provide any additional functionality if you
+ // don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, see [RFC 1864].
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutBucketWebsiteOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutBucketWebsite{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketWebsite"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutBucketWebsite(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutBucketWebsiteUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutBucketWebsiteInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutBucketWebsite",
+ }
+}
+
+// getPutBucketWebsiteRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutBucketWebsiteInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutBucketWebsiteBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutBucketWebsiteBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutBucketWebsiteInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutBucketWebsiteUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutBucketWebsiteBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go
new file mode 100644
index 000000000..cc0fb9e5f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go
@@ -0,0 +1,1037 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "time"
+)
+
+// Adds an object to a bucket.
+//
+// - Amazon S3 never adds partial objects; if you receive a success response,
+// Amazon S3 added the entire object to the bucket. You cannot use PutObject to
+// only update a single piece of metadata for an existing object. You must put the
+// entire object with updated metadata if you want to update some values.
+//
+// - If your bucket uses the bucket owner enforced setting for Object Ownership,
+// ACLs are disabled and no longer affect permissions. All objects written to the
+// bucket by any account will be owned by the bucket owner.
+//
+// - Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support
+// virtual-hosted-style requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Amazon S3 is a distributed system. If it receives multiple write requests for
+// the same object simultaneously, it overwrites all but the last object written.
+// However, Amazon S3 provides features that can modify this behavior:
+//
+// - S3 Object Lock - To prevent objects from being deleted or overwritten, you
+// can use [Amazon S3 Object Lock]in the Amazon S3 User Guide.
+//
+// This functionality is not supported for directory buckets.
+//
+// - If-None-Match - Uploads the object only if the object key name does not
+// already exist in the specified bucket. Otherwise, Amazon S3 returns a 412
+// Precondition Failed error. If a conflicting operation occurs during the
+// upload, S3 returns a 409 ConditionalRequestConflict response. On a 409
+// failure, retry the upload.
+//
+// Expects the * character (asterisk).
+//
+// For more information, see [Add preconditions to S3 operations with conditional requests]in the Amazon S3 User Guide or [RFC 7232].
+//
+// This functionality is not supported for S3 on Outposts.
+//
+// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3
+// receives multiple write requests for the same object simultaneously, it stores
+// all versions of the objects. For each write request that is made to the same
+// object, Amazon S3 automatically generates a unique version ID of that object
+// being stored in Amazon S3. You can retrieve, replace, or delete any version of
+// the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User
+// Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning]
+// .
+//
+// This functionality is not supported for directory buckets.
+//
+// Permissions
+//
+// - General purpose bucket permissions - The following permissions are required
+// in your policies when your PutObject request includes specific headers.
+//
+// - s3:PutObject - To successfully complete the PutObject request, you must
+// always have the s3:PutObject permission on a bucket to add an object to it.
+//
+// - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject
+// request, you must have the s3:PutObjectAcl .
+//
+// - s3:PutObjectTagging - To successfully set the tag-set with your PutObject
+// request, you must have the s3:PutObjectTagging .
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// If the object is encrypted with SSE-KMS, you must also have the
+//
+// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies
+// and KMS key policies for the KMS key.
+//
+// Data integrity with Content-MD5
+//
+// - General purpose bucket - To ensure that data is not corrupted traversing
+// the network, use the Content-MD5 header. When you use this header, Amazon S3
+// checks the object against the provided MD5 value and, if they do not match,
+// Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5
+// digest, you can calculate the MD5 while putting the object to Amazon S3 and
+// compare the returned ETag to the calculated MD5 value.
+//
+// - Directory bucket - This functionality is not supported for directory
+// buckets.
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// For more information about related Amazon S3 APIs, see the following:
+//
+// [CopyObject]
+//
+// [DeleteObject]
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [Amazon S3 Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html
+// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
+// [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
+// [Add preconditions to S3 operations with conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [RFC 7232]: https://datatracker.ietf.org/doc/rfc7232/
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
+func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) {
+ if params == nil {
+ params = &PutObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObject", params, optFns, c.addOperationPutObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectInput struct {
+
+ // The bucket name to which the PUT action was initiated.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the PUT action was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // The canned ACL to apply to the object. For more information, see [Canned ACL] in the Amazon
+ // S3 User Guide.
+ //
+ // When adding a new object, you can use headers to grant ACL-based permissions to
+ // individual Amazon Web Services accounts or to predefined groups defined by
+ // Amazon S3. These permissions are then added to the ACL on the object. By
+ // default, all objects are private. Only the owner has full access control. For
+ // more information, see [Access Control List (ACL) Overview]and [Managing ACLs Using the REST API] in the Amazon S3 User Guide.
+ //
+ // If the bucket that you're uploading objects to uses the bucket owner enforced
+ // setting for S3 Object Ownership, ACLs are disabled and no longer affect
+ // permissions. Buckets that use this setting only accept PUT requests that don't
+ // specify an ACL or PUT requests that specify bucket owner full control ACLs, such
+ // as the bucket-owner-full-control canned ACL or an equivalent form of this ACL
+ // expressed in the XML format. PUT requests that contain other ACLs (for example,
+ // custom grants to certain Amazon Web Services accounts) fail and return a 400
+ // error with the error code AccessControlListNotSupported . For more information,
+ // see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 User Guide.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ //
+ // [Managing ACLs Using the REST API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html
+ // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+ // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
+ // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+ ACL types.ObjectCannedACL
+
+ // Object data.
+ Body io.Reader
+
+ // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
+ // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // General purpose buckets - Setting this header to true causes Amazon S3 to use
+ // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this
+ // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key.
+ //
+ // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT
+ // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't
+ // supported, when you copy SSE-KMS encrypted objects from general purpose buckets
+ // to directory buckets, from directory buckets to general purpose buckets, or
+ // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a
+ // call to KMS every time a copy request is made for a KMS-encrypted object.
+ //
+ // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+ // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job
+ // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+ // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops
+ BucketKeyEnabled *bool
+
+ // Can be used to specify caching behavior along the request/reply chain. For more
+ // information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9].
+ //
+ // [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
+ CacheControl *string
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3
+ // fails the request with the HTTP status code 400 Bad Request .
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the
+ // supported algorithm from the following list:
+ //
+ // - CRC32
+ //
+ // - CRC32C
+ //
+ // - CRC64NVME
+ //
+ // - SHA1
+ //
+ // - SHA256
+ //
+ // For more information, see [Checking object integrity] in the Amazon S3 User Guide.
+ //
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through
+ // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest
+ // error.
+ //
+ // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any
+ // request to upload an object with a retention period configured using Amazon S3
+ // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide.
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum
+ // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // Specifies presentational information for the object. For more information, see [https://www.rfc-editor.org/rfc/rfc6266#section-4].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc6266#section-4]: https://www.rfc-editor.org/rfc/rfc6266#section-4
+ ContentDisposition *string
+
+ // Specifies what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // Size of the body in bytes. This parameter is useful when the size of the body
+ // cannot be determined automatically. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length
+ ContentLength *int64
+
+ // The Base64 encoded 128-bit MD5 digest of the message (without the headers)
+ // according to RFC 1864. This header can be used as a message integrity check to
+ // verify that the data is the same data that was originally sent. Although it is
+ // optional, we recommend using the Content-MD5 mechanism as an end-to-end
+ // integrity check. For more information about REST request authentication, see [REST Authentication].
+ //
+ // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any
+ // request to upload an object with a retention period configured using Amazon S3
+ // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+ // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object
+ ContentMD5 *string
+
+ // A standard MIME type describing the format of the contents. For more
+ // information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type
+ ContentType *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The date and time at which the object is no longer cacheable. For more
+ // information, see [https://www.rfc-editor.org/rfc/rfc7234#section-5.3].
+ //
+ // [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3
+ Expires *time.Time
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ GrantFullControl *string
+
+ // Allows grantee to read the object data and its metadata.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ GrantRead *string
+
+ // Allows grantee to read the object ACL.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ GrantReadACP *string
+
+ // Allows grantee to write the ACL for the applicable object.
+ //
+ // - This functionality is not supported for directory buckets.
+ //
+ // - This functionality is not supported for Amazon S3 on Outposts.
+ GrantWriteACP *string
+
+ // Uploads the object only if the ETag (entity tag) value provided during the
+ // WRITE operation matches the ETag of the object in S3. If the ETag values do not
+ // match, the operation returns a 412 Precondition Failed error.
+ //
+ // If a conflicting operation occurs during the upload S3 returns a 409
+ // ConditionalRequestConflict response. On a 409 failure you should fetch the
+ // object's ETag and retry the upload.
+ //
+ // Expects the ETag value as a string.
+ //
+ // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3
+ // User Guide.
+ //
+ // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfMatch *string
+
+ // Uploads the object only if the object key name does not already exist in the
+ // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error.
+ //
+ // If a conflicting operation occurs during the upload S3 returns a 409
+ // ConditionalRequestConflict response. On a 409 failure you should retry the
+ // upload.
+ //
+ // Expects the '*' (asterisk) character.
+ //
+ // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3
+ // User Guide.
+ //
+ // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html
+ // [RFC 7232]: https://tools.ietf.org/html/rfc7232
+ IfNoneMatch *string
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]string
+
+ // Specifies whether a legal hold will be applied to this object. For more
+ // information about S3 Object Lock, see [Object Lock]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // The Object Lock mode that you want to apply to this object.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockMode types.ObjectLockMode
+
+ // The date and time when you want this object's Object Lock to expire. Must be
+ // formatted as a timestamp parameter.
+ //
+ // This functionality is not supported for directory buckets.
+ ObjectLockRetainUntilDate *time.Time
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use when encrypting the object (for example, AES256 ).
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // Specifies the Amazon Web Services KMS Encryption Context as an additional
+ // encryption context to use for object encryption. The value of this header is a
+ // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption
+ // context as key-value pairs. This value is stored as object metadata and
+ // automatically gets passed on to Amazon Web Services KMS for future GetObject
+ // operations on this object.
+ //
+ // General purpose buckets - This value must be explicitly added during CopyObject
+ // operations if you want an additional encryption context for your object. For
+ // more information, see [Encryption context]in the Amazon S3 User Guide.
+ //
+ // Directory buckets - You can optionally provide an explicit encryption context
+ // value. The value must match the default encryption context - the bucket Amazon
+ // Resource Name (ARN). An additional encryption context value is not supported.
+ //
+ // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context
+ SSEKMSEncryptionContext *string
+
+ // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object
+ // encryption. If the KMS key doesn't exist in the same account that's issuing the
+ // command, you must use the full Key ARN not the Key ID.
+ //
+ // General purpose buckets - If you specify x-amz-server-side-encryption with
+ // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key
+ // Alias) of the KMS key to use. If you specify
+ // x-amz-server-side-encryption:aws:kms or
+ // x-amz-server-side-encryption:aws:kms:dsse , but do not provide
+ // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web
+ // Services managed key ( aws/s3 ) to protect the data.
+ //
+ // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify
+ // the x-amz-server-side-encryption header to aws:kms . Then, the
+ // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's
+ // default KMS customer managed key ID. If you want to explicitly set the
+ // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's
+ // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS
+ // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3
+ // ) isn't supported.
+ //
+ // Incorrect key specification results in an HTTP 400 Bad Request error.
+ //
+ // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+ // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm that was used when you store this object
+ // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ).
+ //
+ // - General purpose buckets - You have four mutually exclusive options to
+ // protect data using server-side encryption in Amazon S3, depending on how you
+ // choose to manage the encryption keys. Specifically, the encryption key options
+ // are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or
+ // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with
+ // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You
+ // can optionally tell Amazon S3 to encrypt data at rest by using server-side
+ // encryption with other key options. For more information, see [Using Server-Side Encryption]in the Amazon S3
+ // User Guide.
+ //
+ // - Directory buckets - For directory buckets, there are only two supported
+ // options for server-side encryption: server-side encryption with Amazon S3
+ // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys
+ // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses
+ // the desired encryption configuration and you don't override the bucket default
+ // encryption in your CreateSession requests or PUT object requests. Then, new
+ // objects are automatically encrypted with the desired encryption settings. For
+ // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about
+ // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads].
+ //
+ // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the
+ // encryption request headers must match the encryption settings that are specified
+ // in the CreateSession request. You can't override the values of the encryption
+ // settings ( x-amz-server-side-encryption ,
+ // x-amz-server-side-encryption-aws-kms-key-id ,
+ // x-amz-server-side-encryption-context , and
+ // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the
+ // CreateSession request. You don't need to explicitly specify these encryption
+ // settings values in Zonal endpoint API calls, and Amazon S3 will use the
+ // encryption settings values from the CreateSession request to protect new
+ // objects in the directory bucket.
+ //
+ // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the
+ // session token refreshes automatically to avoid service interruptions when a
+ // session expires. The CLI or the Amazon Web Services SDKs use the bucket's
+ // default encryption configuration for the CreateSession request. It's not
+ // supported to override the encryption settings values in the CreateSession
+ // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption
+ // request headers must match the default encryption configuration of the directory
+ // bucket.
+ //
+ // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html
+ // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html
+ // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
+ // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+ // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+ ServerSideEncryption types.ServerSideEncryption
+
+ // By default, Amazon S3 uses the STANDARD Storage Class to store newly created
+ // objects. The STANDARD storage class provides high durability and high
+ // availability. Depending on performance needs, you can specify a different
+ // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide.
+ //
+ // - For directory buckets, only the S3 Express One Zone storage class is
+ // supported to store newly created objects.
+ //
+ // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
+ StorageClass types.StorageClass
+
+ // The tag-set for the object. The tag-set must be encoded as URL Query
+ // parameters. (For example, "Key1=Value1")
+ //
+ // This functionality is not supported for directory buckets.
+ Tagging *string
+
+ // If the bucket is configured as a website, redirects requests for this object to
+ // another object in the same bucket or to an external URL. Amazon S3 stores the
+ // value of this header in the object metadata. For information about object
+ // metadata, see [Object Key and Metadata]in the Amazon S3 User Guide.
+ //
+ // In the following example, the request header sets the redirect to an object
+ // (anotherPage.html) in the same bucket:
+ //
+ // x-amz-website-redirect-location: /anotherPage.html
+ //
+ // In the following example, the request header sets the object redirect to
+ // another website:
+ //
+ // x-amz-website-redirect-location: http://www.example.com/
+ //
+ // For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3] and [How to Configure Website Page Redirects] in the
+ // Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [How to Configure Website Page Redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html
+ // [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html
+ // [Object Key and Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+ WebsiteRedirectLocation *string
+
+ // Specifies the offset for appending data to existing objects in bytes. The
+ // offset must be equal to the size of the existing object being appended to. If no
+ // object exists, setting this header to 0 will create a new object.
+ //
+ // This functionality is only supported for objects in the Amazon S3 Express One
+ // Zone storage class in directory buckets.
+ WriteOffsetBytes *int64
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type PutObjectOutput struct {
+
+ // Indicates whether the uploaded object uses an S3 Bucket Key for server-side
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only
+ // be present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only
+ // present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32C *string
+
+ // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This header is
+ // present if the object was uploaded with the CRC64NVME checksum algorithm, or if
+ // it was uploaded without a checksum (and Amazon S3 added the default checksum,
+ // CRC64NVME , to the uploaded object). For more information about how checksums
+ // are calculated with multipart uploads, see [Checking object integrity in the Amazon S3 User Guide].
+ //
+ // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use the API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA256 *string
+
+ // This header specifies the checksum type of the object, which determines how
+ // part-level checksums are combined to create an object-level checksum for
+ // multipart objects. For PutObject uploads, the checksum type is always
+ // FULL_OBJECT . You can use this header as a data integrity check to verify that
+ // the checksum type that is received is the same checksum that was specified. For
+ // more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType types.ChecksumType
+
+ // Entity tag for the uploaded object.
+ //
+ // General purpose buckets - To ensure that data is not corrupted traversing the
+ // network, for objects where the ETag is the MD5 digest of the object, you can
+ // calculate the MD5 while putting an object to Amazon S3 and compare the returned
+ // ETag to the calculated MD5 value.
+ //
+ // Directory buckets - The ETag for the object in a directory bucket isn't the MD5
+ // digest of the object.
+ ETag *string
+
+ // If the expiration is configured for the object (see [PutBucketLifecycleConfiguration]) in the Amazon S3 User
+ // Guide, the response includes this header. It includes the expiry-date and
+ // rule-id key-value pairs that provide information about object expiration. The
+ // value of the rule-id is URL-encoded.
+ //
+ // Object expiration information is not returned in directory buckets and this
+ // header returns the value " NotImplemented " in all responses for directory
+ // buckets.
+ //
+ // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
+ Expiration *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to confirm the encryption
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to provide the round-trip
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // If present, indicates the Amazon Web Services KMS Encryption Context to use for
+ // object encryption. The value of this header is a Base64 encoded string of a
+ // UTF-8 encoded JSON, which contains the encryption context as key-value pairs.
+ // This value is stored as object metadata and automatically gets passed on to
+ // Amazon Web Services KMS for future GetObject operations on this object.
+ SSEKMSEncryptionContext *string
+
+ // If present, indicates the ID of the KMS key that was used for object encryption.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when you store this object in Amazon
+ // S3.
+ ServerSideEncryption types.ServerSideEncryption
+
+ // The size of the object in bytes. This value is only be present if you append
+ // to an object.
+ //
+ // This functionality is only supported for objects in the Amazon S3 Express One
+ // Zone storage class in directory buckets.
+ Size *int64
+
+ // Version ID of the object.
+ //
+ // If you enable versioning for a bucket, Amazon S3 automatically generates a
+ // unique version ID for the object being stored. Amazon S3 returns this ID in the
+ // response. When you enable versioning for a bucket, if Amazon S3 receives
+ // multiple write requests for the same object simultaneously, it stores all of the
+ // objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User
+ // Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html
+ // [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutObject"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = add100Continue(stack, options); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutObjectInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutObject",
+ }
+}
+
+// getPutObjectRequestAlgorithmMember gets the request checksum algorithm value
+// provided as input.
+func getPutObjectRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutObjectInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutObjectRequestAlgorithmMember,
+ RequireChecksum: false,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: true,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutObjectBucketMember returns a pointer to string denoting a provided bucket
+// member valueand a boolean indicating if the input has a modeled bucket name,
+func getPutObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
+
+// PresignPutObject is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &PutObjectInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns,
+ c.client.addOperationPutObjectMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ func(stack *middleware.Stack, options Options) error {
+ return awshttp.RemoveContentTypeHeader(stack)
+ },
+ addPutObjectPayloadAsUnsigned,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
+
+func addPutObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
+ v4.RemoveContentSHA256HeaderMiddleware(stack)
+ v4.RemoveComputePayloadSHA256Middleware(stack)
+ return v4.AddUnsignedPayloadMiddleware(stack)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go
new file mode 100644
index 000000000..898d6ce7f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go
@@ -0,0 +1,503 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Uses the acl subresource to set the access control list (ACL) permissions for a
+// new or existing object in an S3 bucket. You must have the WRITE_ACP permission
+// to set the ACL of an object. For more information, see [What permissions can I grant?]in the Amazon S3 User
+// Guide.
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// Depending on your application needs, you can choose to set the ACL on an object
+// using either the request body or the headers. For example, if you have an
+// existing application that updates a bucket ACL using the request body, you can
+// continue to use that approach. For more information, see [Access Control List (ACL) Overview]in the Amazon S3 User
+// Guide.
+//
+// If your bucket uses the bucket owner enforced setting for S3 Object Ownership,
+// ACLs are disabled and no longer affect permissions. You must use policies to
+// grant access to your bucket and the objects in it. Requests to set ACLs or
+// update ACLs fail and return the AccessControlListNotSupported error code.
+// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the
+// Amazon S3 User Guide.
+//
+// Permissions You can set access permissions using one of the following methods:
+//
+// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a
+// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined
+// set of grantees and permissions. Specify the canned ACL name as the value of
+// x-amz-ac l. If you use this header, you cannot use other access
+// control-specific headers in your request. For more information, see [Canned ACL].
+//
+// - Specify access permissions explicitly with the x-amz-grant-read ,
+// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control
+// headers. When using these headers, you specify explicit access permissions and
+// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the
+// permission. If you use these ACL-specific headers, you cannot use x-amz-acl
+// header to set a canned ACL. These parameters map to the set of permissions that
+// Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview].
+//
+// You specify each grantee as a type=value pair, where the type is one of the
+//
+// following:
+//
+// - id – if the value specified is the canonical user ID of an Amazon Web
+// Services account
+//
+// - uri – if you are granting permissions to a predefined group
+//
+// - emailAddress – if the value specified is the email address of an Amazon Web
+// Services account
+//
+// Using email addresses to specify a grantee is only supported in the following
+//
+// Amazon Web Services Regions:
+//
+// - US East (N. Virginia)
+//
+// - US West (N. California)
+//
+// - US West (Oregon)
+//
+// - Asia Pacific (Singapore)
+//
+// - Asia Pacific (Sydney)
+//
+// - Asia Pacific (Tokyo)
+//
+// - Europe (Ireland)
+//
+// - South America (São Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+//
+// Amazon Web Services General Reference.
+//
+// For example, the following x-amz-grant-read header grants list objects
+//
+// permission to the two Amazon Web Services accounts identified by their email
+// addresses.
+//
+// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com"
+//
+// You can use either a canned ACL or specify access permissions explicitly. You
+// cannot do both.
+//
+// Grantee Values You can specify the person (grantee) to whom you're assigning
+// access rights (using request elements) in the following ways:
+//
+// - By the person's ID:
+//
+// <>ID<><>GranteesEmail<>
+//
+// DisplayName is optional and ignored in the request.
+//
+// - By URI:
+//
+// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>
+//
+// - By Email address:
+//
+// <>Grantees@email.com<>lt;/Grantee>
+//
+// The grantee is resolved to the CanonicalUser and, in a response to a GET Object
+//
+// acl request, appears as the CanonicalUser.
+//
+// Using email addresses to specify a grantee is only supported in the following
+//
+// Amazon Web Services Regions:
+//
+// - US East (N. Virginia)
+//
+// - US West (N. California)
+//
+// - US West (Oregon)
+//
+// - Asia Pacific (Singapore)
+//
+// - Asia Pacific (Sydney)
+//
+// - Asia Pacific (Tokyo)
+//
+// - Europe (Ireland)
+//
+// - South America (São Paulo)
+//
+// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the
+//
+// Amazon Web Services General Reference.
+//
+// Versioning The ACL of an object is set at the object version level. By default,
+// PUT sets the ACL of the current version of an object. To set the ACL of a
+// different version, use the versionId subresource.
+//
+// The following operations are related to PutObjectAcl :
+//
+// [CopyObject]
+//
+// [GetObject]
+//
+// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
+// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+// [What permissions can I grant?]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) {
+ if params == nil {
+ params = &PutObjectAclInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectAcl", params, optFns, c.addOperationPutObjectAclMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectAclOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectAclInput struct {
+
+ // The bucket name that contains the object to which you want to attach the ACL.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Key for which the PUT action was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // The canned ACL to apply to the object. For more information, see [Canned ACL].
+ //
+ // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL
+ ACL types.ObjectCannedACL
+
+ // Contains the elements that set the ACL permissions for an object per grantee.
+ AccessControlPolicy *types.AccessControlPolicy
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as
+ // a message integrity check to verify that the request body was not corrupted in
+ // transit. For more information, go to [RFC 1864.>]
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ //
+ // [RFC 1864.>]: http://www.ietf.org/rfc/rfc1864.txt
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ //
+ // This functionality is not supported for Amazon S3 on Outposts.
+ GrantFullControl *string
+
+ // Allows grantee to list the objects in the bucket.
+ //
+ // This functionality is not supported for Amazon S3 on Outposts.
+ GrantRead *string
+
+ // Allows grantee to read the bucket ACL.
+ //
+ // This functionality is not supported for Amazon S3 on Outposts.
+ GrantReadACP *string
+
+ // Allows grantee to create new objects in the bucket.
+ //
+ // For the bucket and object owners of existing objects, also allows deletions and
+ // overwrites of those objects.
+ GrantWrite *string
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ //
+ // This functionality is not supported for Amazon S3 on Outposts.
+ GrantWriteACP *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Version ID used to reference a specific version of the object.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type PutObjectAclOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectAcl{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectAcl"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutObjectAclValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectAcl(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectAclInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutObjectAclUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutObjectAclInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutObjectAcl",
+ }
+}
+
+// getPutObjectAclRequestAlgorithmMember gets the request checksum algorithm value
+// provided as input.
+func getPutObjectAclRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutObjectAclInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutObjectAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutObjectAclRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutObjectAclBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutObjectAclBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectAclInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectAclUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectAclBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go
new file mode 100644
index 000000000..a72024876
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go
@@ -0,0 +1,319 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Applies a legal hold configuration to the specified object. For more
+// information, see [Locking Objects].
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
+func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) {
+ if params == nil {
+ params = &PutObjectLegalHoldInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectLegalHold", params, optFns, c.addOperationPutObjectLegalHoldMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectLegalHoldOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectLegalHoldInput struct {
+
+ // The bucket name containing the object that you want to place a legal hold on.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key name for the object that you want to place a legal hold on.
+ //
+ // This member is required.
+ Key *string
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The MD5 hash for the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Container element for the legal hold configuration you want to apply to the
+ // specified object.
+ LegalHold *types.ObjectLockLegalHold
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // The version ID of the object that you want to place a legal hold on.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type PutObjectLegalHoldOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLegalHold{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLegalHold{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLegalHold"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLegalHold(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectLegalHoldInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutObjectLegalHoldUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutObjectLegalHoldInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutObjectLegalHold",
+ }
+}
+
+// getPutObjectLegalHoldRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getPutObjectLegalHoldRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutObjectLegalHoldInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutObjectLegalHoldInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutObjectLegalHoldRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutObjectLegalHoldBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutObjectLegalHoldBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectLegalHoldInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectLegalHoldUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectLegalHoldBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go
new file mode 100644
index 000000000..e1d6a1888
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go
@@ -0,0 +1,310 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Places an Object Lock configuration on the specified bucket. The rule specified
+// in the Object Lock configuration will be applied by default to every new object
+// placed in the specified bucket. For more information, see [Locking Objects].
+//
+// - The DefaultRetention settings require both a mode and a period.
+//
+// - The DefaultRetention period can be either Days or Years but you must select
+// one. You cannot specify Days and Years at the same time.
+//
+// - You can enable Object Lock for new or existing buckets. For more
+// information, see [Configuring Object Lock].
+//
+// [Configuring Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
+func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) {
+ if params == nil {
+ params = &PutObjectLockConfigurationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectLockConfiguration", params, optFns, c.addOperationPutObjectLockConfigurationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectLockConfigurationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectLockConfigurationInput struct {
+
+ // The bucket whose Object Lock configuration you want to create or replace.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The MD5 hash for the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The Object Lock configuration that you want to apply to the specified bucket.
+ ObjectLockConfiguration *types.ObjectLockConfiguration
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // A token to allow Object Lock to be enabled for an existing bucket.
+ Token *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type PutObjectLockConfigurationOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLockConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectLockConfiguration{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLockConfiguration"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectLockConfiguration(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectLockConfigurationInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutObjectLockConfigurationUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutObjectLockConfigurationInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutObjectLockConfiguration",
+ }
+}
+
+// getPutObjectLockConfigurationRequestAlgorithmMember gets the request checksum
+// algorithm value provided as input.
+func getPutObjectLockConfigurationRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutObjectLockConfigurationInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutObjectLockConfigurationRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutObjectLockConfigurationBucketMember returns a pointer to string denoting
+// a provided bucket member valueand a boolean indicating if the input has a
+// modeled bucket name,
+func getPutObjectLockConfigurationBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectLockConfigurationInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectLockConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectLockConfigurationBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go
new file mode 100644
index 000000000..3c7cde265
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go
@@ -0,0 +1,326 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Places an Object Retention configuration on an object. For more information,
+// see [Locking Objects]. Users or accounts require the s3:PutObjectRetention permission in order
+// to place an Object Retention configuration on objects. Bypassing a Governance
+// Retention configuration requires the s3:BypassGovernanceRetention permission.
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html
+func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) {
+ if params == nil {
+ params = &PutObjectRetentionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectRetention", params, optFns, c.addOperationPutObjectRetentionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectRetentionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectRetentionInput struct {
+
+ // The bucket name that contains the object you want to apply this Object
+ // Retention configuration to.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // The key name for the object that you want to apply this Object Retention
+ // configuration to.
+ //
+ // This member is required.
+ Key *string
+
+ // Indicates whether this action should bypass Governance-mode restrictions.
+ BypassGovernanceRetention *bool
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The MD5 hash for the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // The container element for the Object Retention configuration.
+ Retention *types.ObjectLockRetention
+
+ // The version ID for the object that you want to apply this Object Retention
+ // configuration to.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type PutObjectRetentionOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectRetention{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectRetention{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectRetention"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectRetention(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectRetentionInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutObjectRetentionUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutObjectRetentionInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutObjectRetention",
+ }
+}
+
+// getPutObjectRetentionRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getPutObjectRetentionRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutObjectRetentionInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutObjectRetentionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutObjectRetentionRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutObjectRetentionBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutObjectRetentionBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectRetentionInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectRetentionUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectRetentionBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go
new file mode 100644
index 000000000..6de5386c1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go
@@ -0,0 +1,361 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Sets the supplied tag-set to an object that already exists in a bucket. A tag
+// is a key-value pair. For more information, see [Object Tagging].
+//
+// You can associate tags with an object by sending a PUT request against the
+// tagging subresource that is associated with the object. You can retrieve tags by
+// sending a GET request. For more information, see [GetObjectTagging].
+//
+// For tagging-related restrictions related to characters and encodings, see [Tag Restrictions].
+// Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
+//
+// To use this operation, you must have permission to perform the
+// s3:PutObjectTagging action. By default, the bucket owner has this permission and
+// can grant this permission to others.
+//
+// To put tags of any other version, use the versionId query parameter. You also
+// need permission for the s3:PutObjectVersionTagging action.
+//
+// PutObjectTagging has the following special errors. For more Amazon S3 errors
+// see, [Error Responses].
+//
+// - InvalidTag - The tag provided was not a valid tag. This error can occur if
+// the tag did not pass input validation. For more information, see [Object Tagging].
+//
+// - MalformedXML - The XML provided does not match the schema.
+//
+// - OperationAborted - A conflicting conditional action is currently in progress
+// against this resource. Please try again.
+//
+// - InternalError - The service was unable to apply the provided tag to the
+// object.
+//
+// The following operations are related to PutObjectTagging :
+//
+// [GetObjectTagging]
+//
+// [DeleteObjectTagging]
+//
+// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html
+// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html
+// [Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html
+// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html
+func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) {
+ if params == nil {
+ params = &PutObjectTaggingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutObjectTagging", params, optFns, c.addOperationPutObjectTaggingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutObjectTaggingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutObjectTaggingInput struct {
+
+ // The bucket name containing the object.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Name of the object key.
+ //
+ // This member is required.
+ Key *string
+
+ // Container for the TagSet and Tag elements
+ //
+ // This member is required.
+ Tagging *types.Tagging
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The MD5 hash for the request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // The versionId of the object that the tag-set will be added to.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutObjectTaggingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type PutObjectTaggingOutput struct {
+
+ // The versionId of the object the tag-set was added to.
+ VersionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutObjectTagging{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectTagging"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutObjectTagging(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutObjectTaggingInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutObjectTaggingUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutObjectTaggingInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutObjectTagging",
+ }
+}
+
+// getPutObjectTaggingRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getPutObjectTaggingRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutObjectTaggingInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutObjectTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutObjectTaggingRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutObjectTaggingBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getPutObjectTaggingBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutObjectTaggingInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutObjectTaggingUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutObjectTaggingBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go
new file mode 100644
index 000000000..114bcc149
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go
@@ -0,0 +1,313 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Creates or modifies the PublicAccessBlock configuration for an Amazon S3
+// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
+// permission. For more information about Amazon S3 permissions, see [Specifying Permissions in a Policy].
+//
+// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an
+// object, it checks the PublicAccessBlock configuration for both the bucket (or
+// the bucket that contains the object) and the bucket owner's account. If the
+// PublicAccessBlock configurations are different between the bucket and the
+// account, Amazon S3 uses the most restrictive combination of the bucket-level and
+// account-level settings.
+//
+// For more information about when Amazon S3 considers a bucket or an object
+// public, see [The Meaning of "Public"].
+//
+// The following operations are related to PutPublicAccessBlock :
+//
+// [GetPublicAccessBlock]
+//
+// [DeletePublicAccessBlock]
+//
+// [GetBucketPolicyStatus]
+//
+// [Using Amazon S3 Block Public Access]
+//
+// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html
+// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html
+// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html
+// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
+func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) {
+ if params == nil {
+ params = &PutPublicAccessBlockInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutPublicAccessBlock", params, optFns, c.addOperationPutPublicAccessBlockMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutPublicAccessBlockOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutPublicAccessBlockInput struct {
+
+ // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you want
+ // to set.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The PublicAccessBlock configuration that you want to apply to this Amazon S3
+ // bucket. You can enable the configuration options in any combination. For more
+ // information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in
+ // the Amazon S3 User Guide.
+ //
+ // [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
+ //
+ // This member is required.
+ PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The MD5 hash of the PutPublicAccessBlock request body.
+ //
+ // For requests made using the Amazon Web Services Command Line Interface (CLI) or
+ // Amazon Web Services SDKs, this field is calculated automatically.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.UseS3ExpressControlEndpoint = ptr.Bool(true)
+}
+
+type PutPublicAccessBlockOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpPutPublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpPutPublicAccessBlock{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutPublicAccessBlock"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutPublicAccessBlock(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addPutPublicAccessBlockInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addPutPublicAccessBlockUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *PutPublicAccessBlockInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opPutPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutPublicAccessBlock",
+ }
+}
+
+// getPutPublicAccessBlockRequestAlgorithmMember gets the request checksum
+// algorithm value provided as input.
+func getPutPublicAccessBlockRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*PutPublicAccessBlockInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addPutPublicAccessBlockInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getPutPublicAccessBlockRequestAlgorithmMember,
+ RequireChecksum: true,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getPutPublicAccessBlockBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getPutPublicAccessBlockBucketMember(input interface{}) (*string, bool) {
+ in := input.(*PutPublicAccessBlockInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addPutPublicAccessBlockUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getPutPublicAccessBlockBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go
new file mode 100644
index 000000000..69a7f4ffb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go
@@ -0,0 +1,460 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation is not supported for directory buckets.
+//
+// # Restores an archived copy of an object back into Amazon S3
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// This action performs the following types of requests:
+//
+// - restore an archive - Restore an archived object
+//
+// For more information about the S3 structure in the request body, see the
+// following:
+//
+// [PutObject]
+//
+// [Managing Access with ACLs]
+// - in the Amazon S3 User Guide
+//
+// [Protecting Data Using Server-Side Encryption]
+// - in the Amazon S3 User Guide
+//
+// Permissions To use this operation, you must have permissions to perform the
+// s3:RestoreObject action. The bucket owner has this permission by default and can
+// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations]
+// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide.
+//
+// Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval
+// Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3
+// Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are
+// not accessible in real time. For objects in the S3 Glacier Flexible Retrieval
+// Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first
+// initiate a restore request, and then wait until a temporary copy of the object
+// is available. If you want a permanent copy of the object, create a copy of it in
+// the Amazon S3 Standard storage class in your S3 bucket. To access an archived
+// object, you must restore the object for the duration (number of days) that you
+// specify. For objects in the Archive Access or Deep Archive Access tiers of S3
+// Intelligent-Tiering, you must first initiate a restore request, and then wait
+// until the object is moved into the Frequent Access tier.
+//
+// To restore a specific object version, you can provide a version ID. If you
+// don't provide a version ID, Amazon S3 restores the current version.
+//
+// When restoring an archived object, you can specify one of the following data
+// access tier options in the Tier element of the request body:
+//
+// - Expedited - Expedited retrievals allow you to quickly access your data
+// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or
+// S3 Intelligent-Tiering Archive tier when occasional urgent requests for
+// restoring archives are required. For all but the largest archived objects (250
+// MB+), data accessed using Expedited retrievals is typically made available
+// within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for
+// Expedited retrievals is available when you need it. Expedited retrievals and
+// provisioned capacity are not available for objects stored in the S3 Glacier Deep
+// Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
+//
+// - Standard - Standard retrievals allow you to access any of your archived
+// objects within several hours. This is the default option for retrieval requests
+// that do not specify the retrieval option. Standard retrievals typically finish
+// within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval
+// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They
+// typically finish within 12 hours for objects stored in the S3 Glacier Deep
+// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard
+// retrievals are free for objects stored in S3 Intelligent-Tiering.
+//
+// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible
+// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve
+// large amounts, even petabytes, of data at no cost. Bulk retrievals typically
+// finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval
+// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk
+// retrievals are also the lowest-cost retrieval option when restoring objects from
+// S3 Glacier Deep Archive. They typically finish within 48 hours for objects
+// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering
+// Deep Archive tier.
+//
+// For more information about archive retrieval options and provisioned capacity
+// for Expedited data access, see [Restoring Archived Objects] in the Amazon S3 User Guide.
+//
+// You can use Amazon S3 restore speed upgrade to change the restore speed to a
+// faster speed while it is in progress. For more information, see [Upgrading the speed of an in-progress restore]in the Amazon
+// S3 User Guide.
+//
+// To get the status of object restoration, you can send a HEAD request.
+// Operations return the x-amz-restore header, which provides information about
+// the restoration status, in the response. You can use Amazon S3 event
+// notifications to notify you when a restore is initiated or completed. For more
+// information, see [Configuring Amazon S3 Event Notifications]in the Amazon S3 User Guide.
+//
+// After restoring an archived object, you can update the restoration period by
+// reissuing the request with a new period. Amazon S3 updates the restoration
+// period relative to the current time and charges only for the request-there are
+// no data transfer charges. You cannot update the restoration period when Amazon
+// S3 is actively processing your current restore request for the object.
+//
+// If your bucket has a lifecycle configuration with a rule that includes an
+// expiration action, the object expiration overrides the life span that you
+// specify in a restore request. For example, if you restore an object copy for 10
+// days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the
+// object in 3 days. For more information about lifecycle configuration, see [PutBucketLifecycleConfiguration]and [Object Lifecycle Management]
+// in Amazon S3 User Guide.
+//
+// Responses A successful action returns either the 200 OK or 202 Accepted status
+// code.
+//
+// - If the object is not previously restored, then Amazon S3 returns 202
+// Accepted in the response.
+//
+// - If the object is previously restored, Amazon S3 returns 200 OK in the
+// response.
+//
+// - Special errors:
+//
+// - Code: RestoreAlreadyInProgress
+//
+// - Cause: Object restore is already in progress.
+//
+// - HTTP Status Code: 409 Conflict
+//
+// - SOAP Fault Code Prefix: Client
+//
+// - Code: GlacierExpeditedRetrievalNotAvailable
+//
+// - Cause: expedited retrievals are currently not available. Try again later.
+// (Returned if there is insufficient capacity to process the Expedited request.
+// This error applies only to Expedited retrievals and not to S3 Standard or Bulk
+// retrievals.)
+//
+// - HTTP Status Code: 503
+//
+// - SOAP Fault Code Prefix: N/A
+//
+// The following operations are related to RestoreObject :
+//
+// [PutBucketLifecycleConfiguration]
+//
+// [GetBucketNotificationConfiguration]
+//
+// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
+// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
+// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources
+// [Configuring Amazon S3 Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+// [Managing Access with ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
+// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
+// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html
+// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
+// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html
+// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html
+// [Upgrading the speed of an in-progress restore]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html
+func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) {
+ if params == nil {
+ params = &RestoreObjectInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "RestoreObject", params, optFns, c.addOperationRestoreObjectMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*RestoreObjectOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type RestoreObjectInput struct {
+
+ // The bucket name containing the object to restore.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the action was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Container for restore job parameters.
+ RestoreRequest *types.RestoreRequest
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type RestoreObjectOutput struct {
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Indicates the path in the provided S3 output location where Select results will
+ // be restored to.
+ RestoreOutputPath *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpRestoreObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpRestoreObject{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "RestoreObject"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpRestoreObjectValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreObject(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRestoreObjectInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addRestoreObjectUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *RestoreObjectInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opRestoreObject(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "RestoreObject",
+ }
+}
+
+// getRestoreObjectRequestAlgorithmMember gets the request checksum algorithm
+// value provided as input.
+func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*RestoreObjectInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addRestoreObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getRestoreObjectRequestAlgorithmMember,
+ RequireChecksum: false,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: false,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getRestoreObjectBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getRestoreObjectBucketMember(input interface{}) (*string, bool) {
+ in := input.(*RestoreObjectInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addRestoreObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getRestoreObjectBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go
new file mode 100644
index 000000000..2dfa63c6e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go
@@ -0,0 +1,488 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithysync "github.com/aws/smithy-go/sync"
+ "sync"
+)
+
+// This operation is not supported for directory buckets.
+//
+// This action filters the contents of an Amazon S3 object based on a simple
+// structured query language (SQL) statement. In the request, along with the SQL
+// expression, you must also specify a data serialization format (JSON, CSV, or
+// Apache Parquet) of the object. Amazon S3 uses this format to parse object data
+// into records, and returns only records that match the specified SQL expression.
+// You must also specify the data serialization format for the response.
+//
+// This functionality is not supported for Amazon S3 on Outposts.
+//
+// For more information about Amazon S3 Select, see [Selecting Content from Objects] and [SELECT Command] in the Amazon S3 User
+// Guide.
+//
+// Permissions You must have the s3:GetObject permission for this operation.
+// Amazon S3 Select does not support anonymous access. For more information about
+// permissions, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide.
+//
+// Object Data Formats You can use Amazon S3 Select to query objects that have the
+// following format properties:
+//
+// - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
+//
+// - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
+//
+// - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2.
+// GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports
+// for CSV and JSON files. Amazon S3 Select supports columnar compression for
+// Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object
+// compression for Parquet objects.
+//
+// - Server-side encryption - Amazon S3 Select supports querying objects that
+// are protected with server-side encryption.
+//
+// For objects that are encrypted with customer-provided encryption keys (SSE-C),
+//
+// you must use HTTPS, and you must use the headers that are documented in the [GetObject].
+// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)]in the Amazon S3 User Guide.
+//
+// For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon
+//
+// Web Services KMS keys (SSE-KMS), server-side encryption is handled
+// transparently, so you don't need to specify anything. For more information about
+// server-side encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using Server-Side Encryption]in the Amazon S3
+// User Guide.
+//
+// Working with the Response Body Given the response size is unknown, Amazon S3
+// Select streams the response as a series of messages and includes a
+// Transfer-Encoding header with chunked as its value in the response. For more
+// information, see [Appendix: SelectObjectContent Response].
+//
+// GetObject Support The SelectObjectContent action does not support the following
+// GetObject functionality. For more information, see [GetObject].
+//
+// - Range : Although you can specify a scan range for an Amazon S3 Select
+// request (see [SelectObjectContentRequest - ScanRange]in the request parameters), you cannot specify the range of
+// bytes of an object to return.
+//
+// - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the
+// ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING
+// storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or
+// REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or
+// DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For
+// more information about storage classes, see [Using Amazon S3 storage classes]in the Amazon S3 User Guide.
+//
+// Special Errors For a list of special errors for this operation, see [List of SELECT Object Content Error Codes]
+//
+// The following operations are related to SelectObjectContent :
+//
+// [GetObject]
+//
+// [GetBucketLifecycleConfiguration]
+//
+// [PutBucketLifecycleConfiguration]
+//
+// [Appendix: SelectObjectContent Response]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html
+// [Selecting Content from Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html
+// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
+// [SelectObjectContentRequest - ScanRange]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange
+// [List of SELECT Object Content Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList
+// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html
+// [Using Amazon S3 storage classes]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html
+// [SELECT Command]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
+//
+// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
+func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) {
+ if params == nil {
+ params = &SelectObjectContentInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "SelectObjectContent", params, optFns, c.addOperationSelectObjectContentMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*SelectObjectContentOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Learn Amazon S3 Select is no longer available to new customers. Existing
+// customers of Amazon S3 Select can continue to use the feature as usual. [Learn more]
+//
+// Request to filter the contents of an Amazon S3 object based on a simple
+// Structured Query Language (SQL) statement. In the request, along with the SQL
+// expression, you must specify a data serialization format (JSON or CSV) of the
+// object. Amazon S3 uses this to parse object data into records. It returns only
+// records that match the specified SQL expression. You must also specify the data
+// serialization format for the response. For more information, see [S3Select API Documentation].
+//
+// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/
+// [S3Select API Documentation]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html
+type SelectObjectContentInput struct {
+
+ // The S3 bucket.
+ //
+ // This member is required.
+ Bucket *string
+
+ // The expression that is used to query the object.
+ //
+ // This member is required.
+ Expression *string
+
+ // The type of the provided expression (for example, SQL).
+ //
+ // This member is required.
+ ExpressionType types.ExpressionType
+
+ // Describes the format of the data in the object that is being queried.
+ //
+ // This member is required.
+ InputSerialization *types.InputSerialization
+
+ // The object key.
+ //
+ // This member is required.
+ Key *string
+
+ // Describes the format of the data that you want Amazon S3 to return in response.
+ //
+ // This member is required.
+ OutputSerialization *types.OutputSerialization
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Specifies if periodic request progress information should be enabled.
+ RequestProgress *types.RequestProgress
+
+ // The server-side encryption (SSE) algorithm used to encrypt the object. This
+ // parameter is needed only when the object was created using a checksum algorithm.
+ // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerAlgorithm *string
+
+ // The server-side encryption (SSE) customer managed key. This parameter is needed
+ // only when the object was created using a checksum algorithm. For more
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerKey *string
+
+ // The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ // needed only when the object was created using a checksum algorithm. For more
+ // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide.
+ //
+ // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+ SSECustomerKeyMD5 *string
+
+ // Specifies the byte range of the object to get the records from. A record is
+ // processed when its first byte is contained by the range. This parameter is
+ // optional, but when specified, it must not be empty. See RFC 2616, Section
+ // 14.35.1 about how to specify the start and end of the range.
+ //
+ // ScanRange may be used in the following ways:
+ //
+ // - 50100 - process only the records starting between the bytes 50 and 100
+ // (inclusive, counting from zero)
+ //
+ // - 50 - process only the records starting after the byte 50
+ //
+ // - 50 - process only the records within the last 50 bytes of the file.
+ ScanRange *types.ScanRange
+
+ noSmithyDocumentSerde
+}
+
+func (in *SelectObjectContentInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+
+}
+
+type SelectObjectContentOutput struct {
+ eventStream *SelectObjectContentEventStream
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+// GetStream returns the type to interact with the event stream.
+func (o *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream {
+ return o.eventStream
+}
+
+func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpSelectObjectContent{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpSelectObjectContent{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "SelectObjectContent"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addEventStreamSelectObjectContentMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSelectObjectContent(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addSelectObjectContentUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *SelectObjectContentInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opSelectObjectContent(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "SelectObjectContent",
+ }
+}
+
+// getSelectObjectContentBucketMember returns a pointer to string denoting a
+// provided bucket member valueand a boolean indicating if the input has a modeled
+// bucket name,
+func getSelectObjectContentBucketMember(input interface{}) (*string, bool) {
+ in := input.(*SelectObjectContentInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addSelectObjectContentUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getSelectObjectContentBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
+
+// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent operation.
+//
+// For testing and mocking the event stream this type should be initialized via
+// the NewSelectObjectContentEventStream constructor function. Using the functional options
+// to pass in nested mock behavior.
+type SelectObjectContentEventStream struct {
+ // SelectObjectContentEventStreamReader is the EventStream reader for the
+ // SelectObjectContentEventStream events. This value is automatically set by the
+ // SDK when the API call is made Use this member when unit testing your code with
+ // the SDK to mock out the EventStream Reader.
+ //
+ // Must not be nil.
+ Reader SelectObjectContentEventStreamReader
+
+ done chan struct{}
+ closeOnce sync.Once
+ err *smithysync.OnceErr
+}
+
+// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream.
+// This function should only be used for testing and mocking the SelectObjectContentEventStream
+// stream within your application.
+//
+// The Reader member must be set before reading events from the stream.
+func NewSelectObjectContentEventStream(optFns ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream {
+ es := &SelectObjectContentEventStream{
+ done: make(chan struct{}),
+ err: smithysync.NewOnceErr(),
+ }
+ for _, fn := range optFns {
+ fn(es)
+ }
+ return es
+}
+
+// Events returns a channel to read events from.
+func (es *SelectObjectContentEventStream) Events() <-chan types.SelectObjectContentEventStream {
+ return es.Reader.Events()
+}
+
+// Close closes the stream. This will also cause the stream to be closed.
+// Close must be called when done using the stream API. Not calling Close
+// may result in resource leaks.
+//
+// Will close the underlying EventStream writer and reader, and no more events can be
+// sent or received.
+func (es *SelectObjectContentEventStream) Close() error {
+ es.closeOnce.Do(es.safeClose)
+ return es.Err()
+}
+
+func (es *SelectObjectContentEventStream) safeClose() {
+ close(es.done)
+
+ es.Reader.Close()
+}
+
+// Err returns any error that occurred while reading or writing EventStream Events
+// from the service API's response. Returns nil if there were no errors.
+func (es *SelectObjectContentEventStream) Err() error {
+ if err := es.err.Err(); err != nil {
+ return err
+ }
+
+ if err := es.Reader.Err(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (es *SelectObjectContentEventStream) waitStreamClose() {
+ type errorSet interface {
+ ErrorSet() <-chan struct{}
+ }
+
+ var outputErrCh <-chan struct{}
+ if v, ok := es.Reader.(errorSet); ok {
+ outputErrCh = v.ErrorSet()
+ }
+ var outputClosedCh <-chan struct{}
+ if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok {
+ outputClosedCh = v.Closed()
+ }
+
+ select {
+ case <-es.done:
+ case <-outputErrCh:
+ es.err.SetError(es.Reader.Err())
+ es.Close()
+
+ case <-outputClosedCh:
+ if err := es.Reader.Err(); err != nil {
+ es.err.SetError(es.Reader.Err())
+ }
+ es.Close()
+
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go
new file mode 100644
index 000000000..624a67b64
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go
@@ -0,0 +1,689 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+// Uploads a part in a multipart upload.
+//
+// In this operation, you provide new data as a part of an object in your request.
+// However, you have an option to specify your existing Amazon S3 object as a data
+// source for the part you are uploading. To upload a part from an existing object,
+// you use the [UploadPartCopy]operation.
+//
+// You must initiate a multipart upload (see [CreateMultipartUpload]) before you can upload any part. In
+// response to your initiate request, Amazon S3 returns an upload ID, a unique
+// identifier that you must include in your upload part request.
+//
+// Part numbers can be any number from 1 to 10,000, inclusive. A part number
+// uniquely identifies a part and also defines its position within the object being
+// created. If you upload a new part using the same part number that was used with
+// a previous part, the previously uploaded part is overwritten.
+//
+// For information about maximum and minimum part sizes and other multipart upload
+// specifications, see [Multipart upload limits]in the Amazon S3 User Guide.
+//
+// After you initiate multipart upload and upload one or more parts, you must
+// either complete or abort multipart upload in order to stop getting charged for
+// storage of the uploaded parts. Only after you either complete or abort multipart
+// upload, Amazon S3 frees up the parts storage and stops charging you for the
+// parts storage.
+//
+// For more information on multipart uploads, go to [Multipart Upload Overview] in the Amazon S3 User Guide .
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Permissions
+// - General purpose bucket permissions - To perform a multipart upload with
+// encryption using an Key Management Service key, the requester must have
+// permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The
+// requester must also have permissions for the kms:GenerateDataKey action for
+// the CreateMultipartUpload API. Then, the requester needs permissions for the
+// kms:Decrypt action on the UploadPart and UploadPartCopy APIs.
+//
+// These permissions are required because Amazon S3 must decrypt and read data
+//
+// from the encrypted file parts before it completes the multipart upload. For more
+// information about KMS permissions, see [Protecting data using server-side encryption with KMS]in the Amazon S3 User Guide. For
+// information about the permissions required to use the multipart upload API, see [Multipart upload and permissions]
+// and [Multipart upload API and permissions]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation on a
+// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation
+// for session-based authorization. Specifically, you grant the
+// s3express:CreateSession permission to the directory bucket in a bucket policy
+// or an IAM identity-based policy. Then, you make the CreateSession API call on
+// the bucket to obtain a session token. With the session token in your request
+// header, you can make API requests to this operation. After the session token
+// expires, you make another CreateSession API call to generate a new session
+// token for use. Amazon Web Services CLI or SDKs create session and refresh the
+// session token automatically to avoid service interruptions when a session
+// expires. For more information about authorization, see [CreateSession]CreateSession .
+//
+// If the object is encrypted with SSE-KMS, you must also have the
+//
+// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies
+// and KMS key policies for the KMS key.
+//
+// Data integrity General purpose bucket - To ensure that data is not corrupted
+// traversing the network, specify the Content-MD5 header in the upload part
+// request. Amazon S3 checks the part data against the provided MD5 value. If they
+// do not match, Amazon S3 returns an error. If the upload request is signed with
+// Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256
+// header as a checksum instead of Content-MD5 . For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)].
+//
+// Directory buckets - MD5 is not supported by directory buckets. You can use
+// checksum algorithms to check object integrity.
+//
+// Encryption
+// - General purpose bucket - Server-side encryption is for data encryption at
+// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers
+// and decrypts it when you access it. You have mutually exclusive options to
+// protect data using server-side encryption in Amazon S3, depending on how you
+// choose to manage the encryption keys. Specifically, the encryption key options
+// are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and
+// Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side
+// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally
+// tell Amazon S3 to encrypt data at rest using server-side encryption with other
+// key options. The option you use depends on whether you want to use KMS keys
+// (SSE-KMS) or provide your own encryption key (SSE-C).
+//
+// Server-side encryption is supported by the S3 Multipart Upload operations.
+//
+// Unless you are using a customer-provided encryption key (SSE-C), you don't need
+// to specify the encryption parameters in each UploadPart request. Instead, you
+// only need to specify the server-side encryption parameters in the initial
+// Initiate Multipart request. For more information, see [CreateMultipartUpload].
+//
+// If you request server-side encryption using a customer-provided encryption key
+//
+// (SSE-C) in your initiate multipart upload request, you must provide identical
+// encryption information in each part upload using the following request headers.
+//
+// - x-amz-server-side-encryption-customer-algorithm
+//
+// - x-amz-server-side-encryption-customer-key
+//
+// - x-amz-server-side-encryption-customer-key-MD5
+//
+// For more information, see [Using Server-Side Encryption]in the Amazon S3 User Guide.
+//
+// - Directory buckets - For directory buckets, there are only two supported
+// options for server-side encryption: server-side encryption with Amazon S3
+// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys
+// (SSE-KMS) ( aws:kms ).
+//
+// Special errors
+//
+// - Error Code: NoSuchUpload
+//
+// - Description: The specified multipart upload does not exist. The upload ID
+// might be invalid, or the multipart upload might have been aborted or completed.
+//
+// - HTTP Status Code: 404 Not Found
+//
+// - SOAP Fault Code Prefix: Client
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following operations are related to UploadPart :
+//
+// [CreateMultipartUpload]
+//
+// [CompleteMultipartUpload]
+//
+// [AbortMultipartUpload]
+//
+// [ListParts]
+//
+// [ListMultipartUploads]
+//
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
+// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+// [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html
+// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+//
+// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html
+// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html
+// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions
+func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) {
+ if params == nil {
+ params = &UploadPartInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UploadPart", params, optFns, c.addOperationUploadPartMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UploadPartOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UploadPartInput struct {
+
+ // The name of the bucket to which the multipart upload was initiated.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // Part number of part being uploaded. This is a positive integer between 1 and
+ // 10,000.
+ //
+ // This member is required.
+ PartNumber *int32
+
+ // Upload ID identifying the multipart upload whose part is being uploaded.
+ //
+ // This member is required.
+ UploadId *string
+
+ // Object data.
+ Body io.Reader
+
+ // Indicates the algorithm used to create the checksum for the object when you use
+ // the SDK. This header will not provide any additional functionality if you don't
+ // use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // If you provide an individual checksum, Amazon S3 ignores any provided
+ // ChecksumAlgorithm parameter.
+ //
+ // This checksum algorithm must be the same for all parts and it match the
+ // checksum value supplied in the CreateMultipartUpload request.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumAlgorithm types.ChecksumAlgorithm
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information,
+ // see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // Size of the body in bytes. This parameter is useful when the size of the body
+ // cannot be determined automatically.
+ ContentLength *int64
+
+ // The Base64 encoded 128-bit MD5 digest of the part data. This parameter is
+ // auto-populated when using the command from the CLI. This parameter is required
+ // if object lock parameters are specified.
+ //
+ // This functionality is not supported for directory buckets.
+ ContentMD5 *string
+
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the HTTP
+ // status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header . This must be the same
+ // encryption key specified in the initiate multipart upload request.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.Key = in.Key
+
+}
+
+type UploadPartOutput struct {
+
+ // Indicates whether the multipart upload uses an S3 Bucket Key for server-side
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only
+ // be present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only
+ // present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32C *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information,
+ // see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use the API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA256 *string
+
+ // Entity tag for the uploaded object.
+ ETag *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to confirm the encryption
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to provide the round-trip
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // If present, indicates the ID of the KMS key that was used for object encryption.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when you store this object in Amazon
+ // S3 (for example, AES256 , aws:kms ).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPart{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPart{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPart"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addRequestChecksumMetricsTracking(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUploadPartValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPart(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = add100Continue(stack, options); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addUploadPartInputChecksumMiddlewares(stack, options); err != nil {
+ return err
+ }
+ if err = addUploadPartUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *UploadPartInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opUploadPart(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UploadPart",
+ }
+}
+
+// getUploadPartRequestAlgorithmMember gets the request checksum algorithm value
+// provided as input.
+func getUploadPartRequestAlgorithmMember(input interface{}) (string, bool) {
+ in := input.(*UploadPartInput)
+ if len(in.ChecksumAlgorithm) == 0 {
+ return "", false
+ }
+ return string(in.ChecksumAlgorithm), true
+}
+
+func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
+ return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{
+ GetAlgorithm: getUploadPartRequestAlgorithmMember,
+ RequireChecksum: false,
+ RequestChecksumCalculation: options.RequestChecksumCalculation,
+ EnableTrailingChecksum: true,
+ EnableComputeSHA256PayloadHash: true,
+ EnableDecodedContentLengthHeader: true,
+ })
+}
+
+// getUploadPartBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getUploadPartBucketMember(input interface{}) (*string, bool) {
+ in := input.(*UploadPartInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addUploadPartUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getUploadPartBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
+
+// PresignUploadPart is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignUploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &UploadPartInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "UploadPart", params, clientOptFns,
+ c.client.addOperationUploadPartMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ func(stack *middleware.Stack, options Options) error {
+ return awshttp.RemoveContentTypeHeader(stack)
+ },
+ addUploadPartPayloadAsUnsigned,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
+
+func addUploadPartPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
+ v4.RemoveContentSHA256HeaderMiddleware(stack)
+ v4.RemoveComputePayloadSHA256Middleware(stack)
+ return v4.AddUnsignedPayloadMiddleware(stack)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go
new file mode 100644
index 000000000..b5be77dff
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go
@@ -0,0 +1,661 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Uploads a part by copying data from an existing object as data source. To
+// specify the data source, you add the request header x-amz-copy-source in your
+// request. To specify a byte range, you add the request header
+// x-amz-copy-source-range in your request.
+//
+// For information about maximum and minimum part sizes and other multipart upload
+// specifications, see [Multipart upload limits]in the Amazon S3 User Guide.
+//
+// Instead of copying data from an existing object as part data, you might use the [UploadPart]
+// action to upload new data as a part of an object in your request.
+//
+// You must initiate a multipart upload before you can upload any part. In
+// response to your initiate request, Amazon S3 returns the upload ID, a unique
+// identifier that you must include in your upload part request.
+//
+// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User
+// Guide. For information about copying objects using a single atomic action vs. a
+// multipart upload, see [Operations on Objects]in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this API
+// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format
+// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information about endpoints
+// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
+// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
+//
+// Authentication and authorization All UploadPartCopy requests must be
+// authenticated and signed by using IAM credentials (access key ID and secret
+// access key for the IAM identities). All headers with the x-amz- prefix,
+// including x-amz-copy-source , must be signed. For more information, see [REST Authentication].
+//
+// Directory buckets - You must use IAM credentials to authenticate and authorize
+// your access to the UploadPartCopy API operation, instead of using the temporary
+// security credentials through the CreateSession API operation.
+//
+// Amazon Web Services CLI or SDKs handles authentication and authorization on
+// your behalf.
+//
+// Permissions You must have READ access to the source object and WRITE access to
+// the destination bucket.
+//
+// - General purpose bucket permissions - You must have the permissions in a
+// policy based on the bucket types of your source bucket and destination bucket in
+// an UploadPartCopy operation.
+//
+// - If the source object is in a general purpose bucket, you must have the
+// s3:GetObject permission to read the source object that is being copied.
+//
+// - If the destination bucket is a general purpose bucket, you must have the
+// s3:PutObject permission to write the object copy to the destination bucket.
+//
+// - To perform a multipart upload with encryption using an Key Management
+// Service key, the requester must have permission to the kms:Decrypt and
+// kms:GenerateDataKey actions on the key. The requester must also have
+// permissions for the kms:GenerateDataKey action for the CreateMultipartUpload
+// API. Then, the requester needs permissions for the kms:Decrypt action on the
+// UploadPart and UploadPartCopy APIs. These permissions are required because
+// Amazon S3 must decrypt and read data from the encrypted file parts before it
+// completes the multipart upload. For more information about KMS permissions, see [Protecting data using server-side encryption with KMS]
+// in the Amazon S3 User Guide. For information about the permissions required to
+// use the multipart upload API, see [Multipart upload and permissions]and [Multipart upload API and permissions]in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - You must have permissions in a bucket policy
+// or an IAM identity-based policy based on the source and destination bucket types
+// in an UploadPartCopy operation.
+//
+// - If the source object that you want to copy is in a directory bucket, you
+// must have the s3express:CreateSession permission in the Action element of a
+// policy to read the object. By default, the session is in the ReadWrite mode.
+// If you want to restrict the access, you can explicitly set the
+// s3express:SessionMode condition key to ReadOnly on the copy source bucket.
+//
+// - If the copy destination is a directory bucket, you must have the
+// s3express:CreateSession permission in the Action element of a policy to write
+// the object to the destination. The s3express:SessionMode condition key cannot
+// be set to ReadOnly on the copy destination.
+//
+// If the object is encrypted with SSE-KMS, you must also have the
+//
+// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies
+// and KMS key policies for the KMS key.
+//
+// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide.
+//
+// Encryption
+//
+// - General purpose buckets - For information about using server-side
+// encryption with customer-provided encryption keys with the UploadPartCopy
+// operation, see [CopyObject]and [UploadPart].
+//
+// - Directory buckets - For directory buckets, there are only two supported
+// options for server-side encryption: server-side encryption with Amazon S3
+// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys
+// (SSE-KMS) ( aws:kms ). For more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide.
+//
+// For directory buckets, when you perform a CreateMultipartUpload operation and an
+//
+// UploadPartCopy operation, the request headers you provide in the
+// CreateMultipartUpload request must match the default encryption configuration
+// of the destination bucket.
+//
+// S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from
+//
+// general purpose buckets to directory buckets, from directory buckets to general
+// purpose buckets, or between directory buckets, through [UploadPartCopy]. In this case, Amazon
+// S3 makes a call to KMS every time a copy request is made for a KMS-encrypted
+// object.
+//
+// Special errors
+//
+// - Error Code: NoSuchUpload
+//
+// - Description: The specified multipart upload does not exist. The upload ID
+// might be invalid, or the multipart upload might have been aborted or completed.
+//
+// - HTTP Status Code: 404 Not Found
+//
+// - Error Code: InvalidRequest
+//
+// - Description: The specified copy source is not supported as a byte-range
+// copy source.
+//
+// - HTTP Status Code: 400 Bad Request
+//
+// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
+// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
+//
+// The following operations are related to UploadPartCopy :
+//
+// [CreateMultipartUpload]
+//
+// [UploadPart]
+//
+// [CompleteMultipartUpload]
+//
+// [AbortMultipartUpload]
+//
+// [ListParts]
+//
+// [ListMultipartUploads]
+//
+// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
+// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
+// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
+// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html
+// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html
+// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
+// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions
+// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html
+// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
+// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html
+// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html
+// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
+// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
+// [Operations on Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html
+// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
+// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
+// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
+//
+// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) {
+ if params == nil {
+ params = &UploadPartCopyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UploadPartCopy", params, optFns, c.addOperationUploadPartCopyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UploadPartCopyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UploadPartCopyInput struct {
+
+ // The bucket name.
+ //
+ // Directory buckets - When you use this operation with a directory bucket, you
+ // must use virtual-hosted-style requests in the format
+ // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
+ // are not supported. Directory bucket names must be unique in the chosen Zone
+ // (Availability Zone or Local Zone). Bucket names must follow the format
+ // bucket-base-name--zone-id--x-s3 (for example,
+ // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
+ // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
+ //
+ // Copying objects across different Amazon Web Services Regions isn't supported
+ // when the source or destination bucket is in Amazon Web Services Local Zones. The
+ // source and destination buckets must have the same parent Amazon Web Services
+ // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code
+ // InvalidRequest .
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the access
+ // point ARN. When using the access point ARN, you must direct requests to the
+ // access point hostname. The access point hostname takes the form
+ // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
+ // action with an access point through the Amazon Web Services SDKs, you provide
+ // the access point ARN in place of the bucket name. For more information about
+ // access point ARNs, see [Using access points]in the Amazon S3 User Guide.
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with S3 on Outposts, you must direct
+ // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the
+ // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When
+ // you use this action with S3 on Outposts, the destination bucket must be the
+ // Outposts access point ARN or the access point alias. For more information about
+ // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide.
+ //
+ // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
+ // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
+ // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the source object for the copy operation. You specify the value in
+ // one of two formats, depending on whether you want to access the source object
+ // through an [access point]:
+ //
+ // - For objects not accessed through an access point, specify the name of the
+ // source bucket and key of the source object, separated by a slash (/). For
+ // example, to copy the object reports/january.pdf from the bucket
+ // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must
+ // be URL-encoded.
+ //
+ // - For objects accessed through access points, specify the Amazon Resource
+ // Name (ARN) of the object as accessed through the access point, in the format
+ // arn:aws:s3:::accesspoint//object/ . For example, to copy the object
+ // reports/january.pdf through access point my-access-point owned by account
+ // 123456789012 in Region us-west-2 , use the URL encoding of
+ // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
+ // . The value must be URL encoded.
+ //
+ // - Amazon S3 supports copy operations using Access points only when the source
+ // and destination buckets are in the same Amazon Web Services Region.
+ //
+ // - Access points are not supported by directory buckets.
+ //
+ // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the
+ // ARN of the object as accessed in the format
+ // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object
+ // reports/january.pdf through outpost my-outpost owned by account 123456789012
+ // in Region us-west-2 , use the URL encoding of
+ // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
+ // . The value must be URL-encoded.
+ //
+ // If your bucket has versioning enabled, you could have multiple versions of the
+ // same object. By default, x-amz-copy-source identifies the current version of
+ // the source object to copy. To copy a specific version of the source object to
+ // copy, append ?versionId= to the x-amz-copy-source request header (for example,
+ // x-amz-copy-source:
+ // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
+ // ).
+ //
+ // If the current version is a delete marker and you don't specify a versionId in
+ // the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error,
+ // because the object does not exist. If you specify versionId in the
+ // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an
+ // HTTP 400 Bad Request error, because you are not allowed to specify a delete
+ // marker as a version for the x-amz-copy-source .
+ //
+ // Directory buckets - S3 Versioning isn't enabled and supported for directory
+ // buckets.
+ //
+ // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html
+ //
+ // This member is required.
+ CopySource *string
+
+ // Object key for which the multipart upload was initiated.
+ //
+ // This member is required.
+ Key *string
+
+ // Part number of part being copied. This is a positive integer between 1 and
+ // 10,000.
+ //
+ // This member is required.
+ PartNumber *int32
+
+ // Upload ID identifying the multipart upload whose part is being copied.
+ //
+ // This member is required.
+ UploadId *string
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ //
+ // If both of the x-amz-copy-source-if-match and
+ // x-amz-copy-source-if-unmodified-since headers are present in the request as
+ // follows:
+ //
+ // x-amz-copy-source-if-match condition evaluates to true , and;
+ //
+ // x-amz-copy-source-if-unmodified-since condition evaluates to false ;
+ //
+ // Amazon S3 returns 200 OK and copies the data.
+ CopySourceIfMatch *string
+
+ // Copies the object if it has been modified since the specified time.
+ //
+ // If both of the x-amz-copy-source-if-none-match and
+ // x-amz-copy-source-if-modified-since headers are present in the request as
+ // follows:
+ //
+ // x-amz-copy-source-if-none-match condition evaluates to false , and;
+ //
+ // x-amz-copy-source-if-modified-since condition evaluates to true ;
+ //
+ // Amazon S3 returns 412 Precondition Failed response code.
+ CopySourceIfModifiedSince *time.Time
+
+ // Copies the object if its entity tag (ETag) is different than the specified ETag.
+ //
+ // If both of the x-amz-copy-source-if-none-match and
+ // x-amz-copy-source-if-modified-since headers are present in the request as
+ // follows:
+ //
+ // x-amz-copy-source-if-none-match condition evaluates to false , and;
+ //
+ // x-amz-copy-source-if-modified-since condition evaluates to true ;
+ //
+ // Amazon S3 returns 412 Precondition Failed response code.
+ CopySourceIfNoneMatch *string
+
+ // Copies the object if it hasn't been modified since the specified time.
+ //
+ // If both of the x-amz-copy-source-if-match and
+ // x-amz-copy-source-if-unmodified-since headers are present in the request as
+ // follows:
+ //
+ // x-amz-copy-source-if-match condition evaluates to true , and;
+ //
+ // x-amz-copy-source-if-unmodified-since condition evaluates to false ;
+ //
+ // Amazon S3 returns 200 OK and copies the data.
+ CopySourceIfUnmodifiedSince *time.Time
+
+ // The range of bytes to copy from the source object. The range value must use the
+ // form bytes=first-last, where the first and last are the zero-based byte offsets
+ // to copy. For example, bytes=0-9 indicates that you want to copy the first 10
+ // bytes of the source. You can copy a range only if the source object is greater
+ // than 5 MB.
+ CopySourceRange *string
+
+ // Specifies the algorithm to use when decrypting the source object (for example,
+ // AES256 ).
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
+ CopySourceSSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one that
+ // was used when the source object was created.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
+ CopySourceSSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
+ CopySourceSSECustomerKeyMD5 *string
+
+ // The account ID of the expected destination bucket owner. If the account ID that
+ // you provide does not match the actual owner of the destination bucket, the
+ // request fails with the HTTP status code 403 Forbidden (access denied).
+ ExpectedBucketOwner *string
+
+ // The account ID of the expected source bucket owner. If the account ID that you
+ // provide does not match the actual owner of the source bucket, the request fails
+ // with the HTTP status code 403 Forbidden (access denied).
+ ExpectedSourceBucketOwner *string
+
+ // Confirms that the requester knows that they will be charged for the request.
+ // Bucket owners need not specify this parameter in their requests. If either the
+ // source or destination S3 bucket has Requester Pays enabled, the requester will
+ // pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
+ // Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer types.RequestPayer
+
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ SSECustomerAlgorithm *string
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in
+ // encrypting data. This value is used to store the object and then it is
+ // discarded; Amazon S3 does not store the encryption key. The key must be
+ // appropriate for use with the algorithm specified in the
+ // x-amz-server-side-encryption-customer-algorithm header. This must be the same
+ // encryption key specified in the initiate multipart upload request.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ SSECustomerKey *string
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure that the
+ // encryption key was transmitted without error.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ SSECustomerKeyMD5 *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.Bucket = in.Bucket
+ p.DisableS3ExpressSessionAuth = ptr.Bool(true)
+}
+
+type UploadPartCopyOutput struct {
+
+ // Indicates whether the multipart upload uses an S3 Bucket Key for server-side
+ // encryption with Key Management Service (KMS) keys (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // Container for all response elements.
+ CopyPartResult *types.CopyPartResult
+
+ // The version of the source object that was copied, if you have enabled
+ // versioning on the source bucket.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
+ CopySourceVersionId *string
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to confirm the encryption
+ // algorithm that's used.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerAlgorithm *string
+
+ // If server-side encryption with a customer-provided encryption key was
+ // requested, the response will include this header to provide the round-trip
+ // message integrity verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
+ SSECustomerKeyMD5 *string
+
+ // If present, indicates the ID of the KMS key that was used for object encryption.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when you store this object in Amazon
+ // S3 (for example, AES256 , aws:kms ).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPartCopy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpUploadPartCopy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPartCopy"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUploadPartCopy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (v *UploadPartCopyInput) bucket() (string, bool) {
+ if v.Bucket == nil {
+ return "", false
+ }
+ return *v.Bucket, true
+}
+
+func newServiceMetadataMiddleware_opUploadPartCopy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UploadPartCopy",
+ }
+}
+
+// getUploadPartCopyBucketMember returns a pointer to string denoting a provided
+// bucket member valueand a boolean indicating if the input has a modeled bucket
+// name,
+func getUploadPartCopyBucketMember(input interface{}) (*string, bool) {
+ in := input.(*UploadPartCopyInput)
+ if in.Bucket == nil {
+ return nil, false
+ }
+ return in.Bucket, true
+}
+func addUploadPartCopyUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: getUploadPartCopyBucketMember,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: false,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go
new file mode 100644
index 000000000..9a9bbcede
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go
@@ -0,0 +1,540 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "strings"
+ "time"
+)
+
+// This operation is not supported for directory buckets.
+//
+// Passes transformed objects to a GetObject operation when using Object Lambda
+// access points. For information about Object Lambda access points, see [Transforming objects with Object Lambda access points]in the
+// Amazon S3 User Guide.
+//
+// This operation supports metadata that can be returned by [GetObject], in addition to
+// RequestRoute , RequestToken , StatusCode , ErrorCode , and ErrorMessage . The
+// GetObject response metadata is supported so that the WriteGetObjectResponse
+// caller, typically an Lambda function, can provide the same metadata when it
+// internally invokes GetObject . When WriteGetObjectResponse is called by a
+// customer-owned Lambda function, the metadata returned to the end user GetObject
+// call might differ from what Amazon S3 would normally return.
+//
+// You can include any number of metadata headers. When including a metadata
+// header, it should be prefaced with x-amz-meta . For example,
+// x-amz-meta-my-custom-header: MyCustomValue . The primary use case for this is to
+// forward GetObject metadata.
+//
+// Amazon Web Services provides some prebuilt Lambda functions that you can use
+// with S3 Object Lambda to detect and redact personally identifiable information
+// (PII) and decompress S3 objects. These Lambda functions are available in the
+// Amazon Web Services Serverless Application Repository, and can be selected
+// through the Amazon Web Services Management Console when you create your Object
+// Lambda access point.
+//
+// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a
+// natural language processing (NLP) service using machine learning to find
+// insights and relationships in text. It automatically detects personally
+// identifiable information (PII) such as names, addresses, dates, credit card
+// numbers, and social security numbers from documents in your Amazon S3 bucket.
+//
+// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a
+// natural language processing (NLP) service using machine learning to find
+// insights and relationships in text. It automatically redacts personally
+// identifiable information (PII) such as names, addresses, dates, credit card
+// numbers, and social security numbers from documents in your Amazon S3 bucket.
+//
+// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is
+// equipped to decompress objects stored in S3 in one of six compressed file
+// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
+//
+// For information on how to view and use these functions, see [Using Amazon Web Services built Lambda functions] in the Amazon S3
+// User Guide.
+//
+// [Transforming objects with Object Lambda access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html
+// [Using Amazon Web Services built Lambda functions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html
+// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
+func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) {
+ if params == nil {
+ params = &WriteGetObjectResponseInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "WriteGetObjectResponse", params, optFns, c.addOperationWriteGetObjectResponseMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*WriteGetObjectResponseOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type WriteGetObjectResponseInput struct {
+
+ // Route prefix to the HTTP URL generated.
+ //
+ // This member is required.
+ RequestRoute *string
+
+ // A single use encrypted token that maps WriteGetObjectResponse to the end user
+ // GetObject request.
+ //
+ // This member is required.
+ RequestToken *string
+
+ // Indicates that a range of bytes was specified.
+ AcceptRanges *string
+
+ // The object data.
+ Body io.Reader
+
+ // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for
+ // server-side encryption with Amazon Web Services KMS (SSE-KMS).
+ BucketKeyEnabled *bool
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This specifies the Base64
+ // encoded, 32-bit CRC32 checksum of the object returned by the Object Lambda
+ // function. This may not match the checksum for the object stored in Amazon S3.
+ // Amazon S3 will perform validation of the checksum values only when the original
+ // GetObject request required checksum validation. For more information about
+ // checksums, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // Only one checksum header can be specified at a time. If you supply multiple
+ // checksum headers, this request will fail.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This specifies the Base64
+ // encoded, 32-bit CRC32C checksum of the object returned by the Object Lambda
+ // function. This may not match the checksum for the object stored in Amazon S3.
+ // Amazon S3 will perform validation of the checksum values only when the original
+ // GetObject request required checksum validation. For more information about
+ // checksums, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // Only one checksum header can be specified at a time. If you supply multiple
+ // checksum headers, this request will fail.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information,
+ // see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This specifies the Base64
+ // encoded, 160-bit SHA1 digest of the object returned by the Object Lambda
+ // function. This may not match the checksum for the object stored in Amazon S3.
+ // Amazon S3 will perform validation of the checksum values only when the original
+ // GetObject request required checksum validation. For more information about
+ // checksums, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // Only one checksum header can be specified at a time. If you supply multiple
+ // checksum headers, this request will fail.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This specifies the Base64
+ // encoded, 256-bit SHA256 digest of the object returned by the Object Lambda
+ // function. This may not match the checksum for the object stored in Amazon S3.
+ // Amazon S3 will perform validation of the checksum values only when the original
+ // GetObject request required checksum validation. For more information about
+ // checksums, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // Only one checksum header can be specified at a time. If you supply multiple
+ // checksum headers, this request will fail.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string
+
+ // Specifies what content encodings have been applied to the object and thus what
+ // decoding mechanisms must be applied to obtain the media-type referenced by the
+ // Content-Type header field.
+ ContentEncoding *string
+
+ // The language the content is in.
+ ContentLanguage *string
+
+ // The size of the content body in bytes.
+ ContentLength *int64
+
+ // The portion of the object returned in the response.
+ ContentRange *string
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string
+
+ // Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false )
+ // a delete marker. To learn more about delete markers, see [Working with delete markers].
+ //
+ // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html
+ DeleteMarker *bool
+
+ // An opaque identifier assigned by a web server to a specific version of a
+ // resource found at a URL.
+ ETag *string
+
+ // A string that uniquely identifies an error condition. Returned in the tag of
+ // the error XML response for a corresponding GetObject call. Cannot be used with
+ // a successful StatusCode header or when the transformed object is provided in
+ // the body. All error codes from S3 are sentence-cased. The regular expression
+ // (regex) value is "^[A-Z][a-zA-Z]+$" .
+ ErrorCode *string
+
+ // Contains a generic description of the error condition. Returned in the tag of
+ // the error XML response for a corresponding GetObject call. Cannot be used with
+ // a successful StatusCode header or when the transformed object is provided in
+ // body.
+ ErrorMessage *string
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key-value pairs
+ // that provide the object expiration information. The value of the rule-id is
+ // URL-encoded.
+ Expiration *string
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time
+
+ // The date and time that the object was last modified.
+ LastModified *time.Time
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]string
+
+ // Set to the number of metadata entries not returned in x-amz-meta headers. This
+ // can happen if you create metadata using an API like SOAP that supports more
+ // flexible metadata than the REST API. For example, using SOAP, you can create
+ // metadata whose values are not legal HTTP headers.
+ MissingMeta *int32
+
+ // Indicates whether an object stored in Amazon S3 has an active legal hold.
+ ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
+
+ // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For
+ // more information about S3 Object Lock, see [Object Lock].
+ //
+ // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html
+ ObjectLockMode types.ObjectLockMode
+
+ // The date and time when Object Lock is configured to expire.
+ ObjectLockRetainUntilDate *time.Time
+
+ // The count of parts this object has.
+ PartsCount *int32
+
+ // Indicates if request involves bucket that is either a source or destination in
+ // a Replication rule. For more information about S3 Replication, see [Replication].
+ //
+ // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html
+ ReplicationStatus types.ReplicationStatus
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ //
+ // This functionality is not supported for directory buckets.
+ RequestCharged types.RequestCharged
+
+ // Provides information about object restoration operation and expiration time of
+ // the restored object copy.
+ Restore *string
+
+ // Encryption algorithm used if server-side encryption with a customer-provided
+ // encryption key was specified for object stored in Amazon S3.
+ SSECustomerAlgorithm *string
+
+ // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to
+ // encrypt data stored in S3. For more information, see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)].
+ //
+ // [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html
+ SSECustomerKeyMD5 *string
+
+ // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web
+ // Services Key Management Service (Amazon Web Services KMS) symmetric encryption
+ // customer managed key that was used for stored in Amazon S3 object.
+ SSEKMSKeyId *string
+
+ // The server-side encryption algorithm used when storing requested object in
+ // Amazon S3 (for example, AES256, aws:kms ).
+ ServerSideEncryption types.ServerSideEncryption
+
+ // The integer status code for an HTTP response of a corresponding GetObject
+ // request. The following is a list of status codes.
+ //
+ // - 200 - OK
+ //
+ // - 206 - Partial Content
+ //
+ // - 304 - Not Modified
+ //
+ // - 400 - Bad Request
+ //
+ // - 401 - Unauthorized
+ //
+ // - 403 - Forbidden
+ //
+ // - 404 - Not Found
+ //
+ // - 405 - Method Not Allowed
+ //
+ // - 409 - Conflict
+ //
+ // - 411 - Length Required
+ //
+ // - 412 - Precondition Failed
+ //
+ // - 416 - Range Not Satisfiable
+ //
+ // - 500 - Internal Server Error
+ //
+ // - 503 - Service Unavailable
+ StatusCode *int32
+
+ // Provides storage class information of the object. Amazon S3 returns this header
+ // for all objects except for S3 Standard storage class objects.
+ //
+ // For more information, see [Storage Classes].
+ //
+ // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
+ StorageClass types.StorageClass
+
+ // The number of tags, if any, on the object.
+ TagCount *int32
+
+ // An ID used to reference a specific version of the object.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *WriteGetObjectResponseInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.UseObjectLambdaEndpoint = ptr.Bool(true)
+}
+
+type WriteGetObjectResponseOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestxml_serializeOpWriteGetObjectResponse{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestxml_deserializeOpWriteGetObjectResponse{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "WriteGetObjectResponse"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addUnsignedPayload(stack); err != nil {
+ return err
+ }
+ if err = addContentSHA256Header(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addPutBucketContextMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addIsExpressUserAgent(stack); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpWriteGetObjectResponseValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opWriteGetObjectResponse(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addMetadataRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addWriteGetObjectResponseUpdateEndpoint(stack, options); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = v4.UseDynamicPayloadSigningMiddleware(stack); err != nil {
+ return err
+ }
+ if err = disableAcceptEncodingGzip(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+type endpointPrefix_opWriteGetObjectResponseMiddleware struct {
+}
+
+func (*endpointPrefix_opWriteGetObjectResponseMiddleware) ID() string {
+ return "EndpointHostPrefix"
+}
+
+func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if smithyhttp.GetHostnameImmutable(ctx) || smithyhttp.IsEndpointHostPrefixDisabled(ctx) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ opaqueInput := getOperationInput(ctx)
+ input, ok := opaqueInput.(*WriteGetObjectResponseInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input type %T", opaqueInput)
+ }
+
+ var prefix strings.Builder
+ if input.RequestRoute == nil {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so may not be nil")}
+ } else if !smithyhttp.ValidHostLabel(*input.RequestRoute) {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("RequestRoute forms part of the endpoint host and so must match \"[a-zA-Z0-9-]{1,63}\", but was \"%s\"", *input.RequestRoute)}
+ } else {
+ prefix.WriteString(*input.RequestRoute)
+ }
+ prefix.WriteString(".")
+ req.URL.Host = prefix.String() + req.URL.Host
+
+ return next.HandleFinalize(ctx, in)
+}
+func addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&endpointPrefix_opWriteGetObjectResponseMiddleware{}, "ResolveEndpointV2", middleware.After)
+}
+
+func newServiceMetadataMiddleware_opWriteGetObjectResponse(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "WriteGetObjectResponse",
+ }
+}
+
+func addWriteGetObjectResponseUpdateEndpoint(stack *middleware.Stack, options Options) error {
+ return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
+ Accessor: s3cust.UpdateEndpointParameterAccessor{
+ GetBucketFromInput: nopGetBucketAccessor,
+ },
+ UsePathStyle: options.UsePathStyle,
+ UseAccelerate: options.UseAccelerate,
+ SupportsAccelerate: true,
+ TargetS3ObjectLambda: true,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointOptions,
+ UseARNRegion: options.UseARNRegion,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go
new file mode 100644
index 000000000..6faedcc0b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go
@@ -0,0 +1,347 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+ params.Region = options.Region
+}
+
+func bindAuthEndpointParams(ctx context.Context, params *AuthResolverParameters, input interface{}, options Options) {
+ params.endpointParams = bindEndpointParams(ctx, input, options)
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+ return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+ }
+ }
+
+ if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+ // The name of the operation being invoked.
+ Operation string
+
+ // The endpoint resolver parameters for this operation. This service's default
+ // resolver delegates to endpoint rules.
+ endpointParams *EndpointParameters
+
+ // The region in which the operation is being invoked.
+ Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+ params := &AuthResolverParameters{
+ Operation: operation,
+ }
+
+ bindAuthEndpointParams(ctx, params, input, options)
+ bindAuthParamsRegion(ctx, params, input, options)
+
+ return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+ ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ if overrides, ok := operationAuthOptions[params.Operation]; ok {
+ return overrides(params), nil
+ }
+ return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+ "WriteGetObjectResponse": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {
+ SchemeID: smithyauth.SchemeIDSigV4,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4SigningName(&props, "s3")
+ smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+ smithyhttp.SetIsUnsignedPayload(&props, true)
+ return props
+ }(),
+ },
+ }
+ },
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {
+ SchemeID: smithyauth.SchemeIDSigV4,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4SigningName(&props, "s3")
+ smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+ return props
+ }(),
+ },
+
+ {
+ SchemeID: smithyauth.SchemeIDSigV4A,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4ASigningName(&props, "s3")
+ smithyhttp.SetSigV4ASigningRegions(&props, []string{params.Region})
+ return props
+ }(),
+ },
+ }
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveAuthScheme")
+ defer span.End()
+
+ params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+ options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+ }
+
+ scheme, ok := m.selectScheme(options)
+ if !ok {
+ return out, metadata, fmt.Errorf("could not select an auth scheme")
+ }
+
+ ctx = setResolvedAuthScheme(ctx, scheme)
+
+ span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID())
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+ for _, option := range options {
+ if option.SchemeID == smithyauth.SchemeIDAnonymous {
+ return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+ }
+
+ for _, scheme := range m.options.AuthSchemes {
+ if scheme.SchemeID() != option.SchemeID {
+ continue
+ }
+
+ if scheme.IdentityResolver(m.options) != nil {
+ return newResolvedAuthScheme(scheme, option), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+ Scheme smithyhttp.AuthScheme
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+ return &resolvedAuthScheme{
+ Scheme: scheme,
+ IdentityProperties: option.IdentityProperties,
+ SignerProperties: option.SignerProperties,
+ }
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+ return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+ v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+ return v
+}
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ innerCtx, span := tracing.StartSpan(ctx, "GetIdentity")
+ defer span.End()
+
+ rscheme := getResolvedAuthScheme(innerCtx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ resolver := rscheme.Scheme.IdentityResolver(m.options)
+ if resolver == nil {
+ return out, metadata, fmt.Errorf("no identity resolver")
+ }
+
+ identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration",
+ func() (smithyauth.Identity, error) {
+ return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties)
+ },
+ func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("get identity: %w", err)
+ }
+
+ ctx = setIdentity(ctx, identity)
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+ return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+ v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+ return v
+}
+
+type signRequestMiddleware struct {
+ options Options
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "SignRequest")
+ defer span.End()
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ identity := getIdentity(ctx)
+ if identity == nil {
+ return out, metadata, fmt.Errorf("no identity")
+ }
+
+ signer := rscheme.Scheme.Signer()
+ if signer == nil {
+ return out, metadata, fmt.Errorf("no signer")
+ }
+
+ _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) {
+ return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties)
+ }, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("sign request: %w", err)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go
new file mode 100644
index 000000000..860af056a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go
@@ -0,0 +1,47 @@
+package s3
+
+import (
+ "context"
+
+ "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// putBucketContextMiddleware stores the input bucket name within the request context (if
+// present) which is required for a variety of custom S3 behaviors
+type putBucketContextMiddleware struct{}
+
+func (*putBucketContextMiddleware) ID() string {
+ return "putBucketContext"
+}
+
+func (m *putBucketContextMiddleware) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if bucket, ok := m.bucketFromInput(in.Parameters); ok {
+ ctx = customizations.SetBucket(ctx, bucket)
+ }
+ return next.HandleSerialize(ctx, in)
+}
+
+func (m *putBucketContextMiddleware) bucketFromInput(params interface{}) (string, bool) {
+ v, ok := params.(bucketer)
+ if !ok {
+ return "", false
+ }
+
+ return v.bucket()
+}
+
+func addPutBucketContextMiddleware(stack *middleware.Stack) error {
+ // This is essentially a post-Initialize task - only run it once the input
+ // has received all modifications from that phase. Therefore we add it as
+ // an early Serialize step.
+ //
+ // FUTURE: it would be nice to have explicit phases that only we as SDK
+ // authors can hook into (such as between phases like this really should
+ // be)
+ return stack.Serialize.Add(&putBucketContextMiddleware{}, middleware.Before)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go
new file mode 100644
index 000000000..4e7f7e24e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go
@@ -0,0 +1,15 @@
+package s3
+
+// implemented by all S3 input structures
+type bucketer interface {
+ bucket() (string, bool)
+}
+
+func bucketFromInput(params interface{}) (string, bool) {
+ v, ok := params.(bucketer)
+ if !ok {
+ return "", false
+ }
+
+ return v.bucket()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go
new file mode 100644
index 000000000..5803b9e45
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go
@@ -0,0 +1,36 @@
+package s3
+
+import (
+ "context"
+ "fmt"
+
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// backfills checksum algorithm onto the context for CreateMultipart upload so
+// transfer manager can set a checksum header on the request accordingly for
+// s3express requests
+type setCreateMPUChecksumAlgorithm struct{}
+
+func (*setCreateMPUChecksumAlgorithm) ID() string {
+ return "setCreateMPUChecksumAlgorithm"
+}
+
+func (*setCreateMPUChecksumAlgorithm) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateMultipartUploadInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected input type %T", in.Parameters)
+ }
+
+ ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(input.ChecksumAlgorithm))
+ return next.HandleSerialize(ctx, in)
+}
+
+func addSetCreateMPUChecksumAlgorithm(s *middleware.Stack) error {
+ return s.Serialize.Add(&setCreateMPUChecksumAlgorithm{}, middleware.Before)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go
new file mode 100644
index 000000000..f1e5ac029
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go
@@ -0,0 +1,24194 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi"
+ awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithytime "github.com/aws/smithy-go/time"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "io/ioutil"
+ "strconv"
+ "strings"
+ "time"
+)
+
+func deserializeS3Expires(v string) (*time.Time, error) {
+ t, err := smithytime.ParseHTTPDate(v)
+ if err != nil {
+ return nil, nil
+ }
+ return &t, nil
+}
+
+type awsRestxml_deserializeOpAbortMultipartUpload struct {
+}
+
+func (*awsRestxml_deserializeOpAbortMultipartUpload) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorAbortMultipartUpload(response, &metadata)
+ }
+ output := &AbortMultipartUploadOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorAbortMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchUpload", errorCode):
+ return awsRestxml_deserializeErrorNoSuchUpload(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsAbortMultipartUploadOutput(v *AbortMultipartUploadOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpCompleteMultipartUpload struct {
+}
+
+func (*awsRestxml_deserializeOpCompleteMultipartUpload) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCompleteMultipartUpload(response, &metadata)
+ }
+ output := &CompleteMultipartUploadOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCompleteMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(v *CompleteMultipartUploadOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteMultipartUploadOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *CompleteMultipartUploadOutput
+ if *v == nil {
+ sv = &CompleteMultipartUploadOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC32", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC32C", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32C = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC64NVME = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA1", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA1 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA256", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA256 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumType = types.ChecksumType(xtv)
+ }
+
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Location", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Location = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpCopyObject struct {
+}
+
+func (*awsRestxml_deserializeOpCopyObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCopyObject(response, &metadata)
+ }
+ output := &CopyObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentCopyObjectResult(&output.CopyObjectResult, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCopyObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ObjectNotInActiveTierError", errorCode):
+ return awsRestxml_deserializeErrorObjectNotInActiveTierError(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(v *CopyObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.CopySourceVersionId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSEncryptionContext = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentCopyObjectOutput(v **CopyObjectOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *CopyObjectOutput
+ if *v == nil {
+ sv = &CopyObjectOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CopyObjectResult", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCopyObjectResult(&sv.CopyObjectResult, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpCreateBucket struct {
+}
+
+func (*awsRestxml_deserializeOpCreateBucket) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCreateBucket(response, &metadata)
+ }
+ output := &CreateBucketOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCreateBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("BucketAlreadyExists", errorCode):
+ return awsRestxml_deserializeErrorBucketAlreadyExists(response, errorBody)
+
+ case strings.EqualFold("BucketAlreadyOwnedByYou", errorCode):
+ return awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("Location"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Location = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response, &metadata)
+ }
+ output := &CreateBucketMetadataTableConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpCreateMultipartUpload struct {
+}
+
+func (*awsRestxml_deserializeOpCreateMultipartUpload) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCreateMultipartUpload(response, &metadata)
+ }
+ output := &CreateMultipartUploadOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCreateMultipartUpload(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMultipartUploadOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.AbortDate = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.AbortRuleId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumAlgorithm = types.ChecksumAlgorithm(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumType = types.ChecksumType(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSEncryptionContext = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(v **CreateMultipartUploadOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *CreateMultipartUploadOutput
+ if *v == nil {
+ sv = &CreateMultipartUploadOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("UploadId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UploadId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpCreateSession struct {
+}
+
+func (*awsRestxml_deserializeOpCreateSession) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorCreateSession(response, &metadata)
+ }
+ output := &CreateSessionOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentCreateSessionOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorCreateSession(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchBucket", errorCode):
+ return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(v *CreateSessionOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSEncryptionContext = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentCreateSessionOutput(v **CreateSessionOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *CreateSessionOutput
+ if *v == nil {
+ sv = &CreateSessionOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSessionCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpDeleteBucket struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucket) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucket(response, &metadata)
+ }
+ output := &DeleteBucketOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response, &metadata)
+ }
+ output := &DeleteBucketAnalyticsConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketCors struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketCors) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketCors(response, &metadata)
+ }
+ output := &DeleteBucketCorsOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketEncryption struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketEncryption) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketEncryption(response, &metadata)
+ }
+ output := &DeleteBucketEncryptionOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response, &metadata)
+ }
+ output := &DeleteBucketIntelligentTieringConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response, &metadata)
+ }
+ output := &DeleteBucketInventoryConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketLifecycle struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketLifecycle) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response, &metadata)
+ }
+ output := &DeleteBucketLifecycleOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response, &metadata)
+ }
+ output := &DeleteBucketMetadataTableConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response, &metadata)
+ }
+ output := &DeleteBucketMetricsConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketOwnershipControls) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response, &metadata)
+ }
+ output := &DeleteBucketOwnershipControlsOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketPolicy struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketPolicy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketPolicy(response, &metadata)
+ }
+ output := &DeleteBucketPolicyOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketReplication struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketReplication) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketReplication(response, &metadata)
+ }
+ output := &DeleteBucketReplicationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketTagging struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketTagging(response, &metadata)
+ }
+ output := &DeleteBucketTaggingOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteBucketWebsite struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteBucketWebsite) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketWebsite(response, &metadata)
+ }
+ output := &DeleteBucketWebsiteOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpDeleteObject struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteObject(response, &metadata)
+ }
+ output := &DeleteObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(v *DeleteObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.DeleteMarker = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpDeleteObjects struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteObjects) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteObjects(response, &metadata)
+ }
+ output := &DeleteObjectsOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentDeleteObjectsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsDeleteObjectsOutput(v *DeleteObjectsOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentDeleteObjectsOutput(v **DeleteObjectsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *DeleteObjectsOutput
+ if *v == nil {
+ sv = &DeleteObjectsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Deleted", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(&sv.Deleted, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Error", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentErrorsUnwrapped(&sv.Errors, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpDeleteObjectTagging struct {
+}
+
+func (*awsRestxml_deserializeOpDeleteObjectTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeleteObjectTagging(response, &metadata)
+ }
+ output := &DeleteObjectTaggingOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeleteObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsDeleteObjectTaggingOutput(v *DeleteObjectTaggingOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpDeletePublicAccessBlock struct {
+}
+
+func (*awsRestxml_deserializeOpDeletePublicAccessBlock) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response, &metadata)
+ }
+ output := &DeletePublicAccessBlockOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorDeletePublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpGetBucketAccelerateConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketAccelerateConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response, &metadata)
+ }
+ output := &GetBucketAccelerateConfigurationOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(v *GetBucketAccelerateConfigurationOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(v **GetBucketAccelerateConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketAccelerateConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketAccelerateConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.BucketAccelerateStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketAcl struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketAcl) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketAcl(response, &metadata)
+ }
+ output := &GetBucketAclOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketAclOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketAclOutput(v **GetBucketAclOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketAclOutput
+ if *v == nil {
+ sv = &GetBucketAclOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessControlList", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response, &metadata)
+ }
+ output := &GetBucketAnalyticsConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentAnalyticsConfiguration(&output.AnalyticsConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketAnalyticsConfigurationOutput(v **GetBucketAnalyticsConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketAnalyticsConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketAnalyticsConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AnalyticsConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&sv.AnalyticsConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketCors struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketCors) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketCors(response, &metadata)
+ }
+ output := &GetBucketCorsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketCorsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketCorsOutput(v **GetBucketCorsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketCorsOutput
+ if *v == nil {
+ sv = &GetBucketCorsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CORSRule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCORSRulesUnwrapped(&sv.CORSRules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketEncryption struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketEncryption) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketEncryption(response, &metadata)
+ }
+ output := &GetBucketEncryptionOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&output.ServerSideEncryptionConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketEncryptionOutput(v **GetBucketEncryptionOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketEncryptionOutput
+ if *v == nil {
+ sv = &GetBucketEncryptionOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ServerSideEncryptionConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(&sv.ServerSideEncryptionConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response, &metadata)
+ }
+ output := &GetBucketIntelligentTieringConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&output.IntelligentTieringConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketIntelligentTieringConfigurationOutput(v **GetBucketIntelligentTieringConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketIntelligentTieringConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketIntelligentTieringConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&sv.IntelligentTieringConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketInventoryConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response, &metadata)
+ }
+ output := &GetBucketInventoryConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentInventoryConfiguration(&output.InventoryConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketInventoryConfigurationOutput(v **GetBucketInventoryConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketInventoryConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketInventoryConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("InventoryConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryConfiguration(&sv.InventoryConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketLifecycleConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketLifecycleConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response, &metadata)
+ }
+ output := &GetBucketLifecycleConfigurationOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(v *GetBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketLifecycleConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketLifecycleConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketLocation struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketLocation) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketLocation(response, &metadata)
+ }
+ output := &GetBucketLocationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketLocationOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketLocation(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketLocationOutput(v **GetBucketLocationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketLocationOutput
+ if *v == nil {
+ sv = &GetBucketLocationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("LocationConstraint", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.LocationConstraint = types.BucketLocationConstraint(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketLogging struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketLogging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketLogging(response, &metadata)
+ }
+ output := &GetBucketLoggingOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLoggingOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketLoggingOutput
+ if *v == nil {
+ sv = &GetBucketLoggingOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("LoggingEnabled", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLoggingEnabled(&sv.LoggingEnabled, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketMetadataTableConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response, &metadata)
+ }
+ output := &GetBucketMetadataTableConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&output.GetBucketMetadataTableConfigurationResult, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketMetadataTableConfigurationOutput(v **GetBucketMetadataTableConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketMetadataTableConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketMetadataTableConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("GetBucketMetadataTableConfigurationResult", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&sv.GetBucketMetadataTableConfigurationResult, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata)
+ }
+ output := &GetBucketMetricsConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketMetricsConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketMetricsConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("MetricsConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata)
+ }
+ output := &GetBucketNotificationConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketNotificationConfigurationOutput
+ if *v == nil {
+ sv = &GetBucketNotificationConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("EventBridgeConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("QueueConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(&sv.QueueConfigurations, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("TopicConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(&sv.TopicConfigurations, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketOwnershipControls) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response, &metadata)
+ }
+ output := &GetBucketOwnershipControlsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentOwnershipControls(&output.OwnershipControls, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketOwnershipControlsOutput(v **GetBucketOwnershipControlsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketOwnershipControlsOutput
+ if *v == nil {
+ sv = &GetBucketOwnershipControlsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("OwnershipControls", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwnershipControls(&sv.OwnershipControls, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketPolicy struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketPolicy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicy(response, &metadata)
+ }
+ output := &GetBucketPolicyOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(output, response.Body, response.ContentLength)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketPolicyOutput(v *GetBucketPolicyOutput, body io.ReadCloser, contentLength int64) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization of nil %T", v)
+ }
+ var buf bytes.Buffer
+ if contentLength > 0 {
+ buf.Grow(int(contentLength))
+ } else {
+ buf.Grow(512)
+ }
+
+ _, err := buf.ReadFrom(body)
+ if err != nil {
+ return err
+ }
+ if buf.Len() > 0 {
+ v.Policy = ptr.String(buf.String())
+ }
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketPolicyStatus struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketPolicyStatus) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response, &metadata)
+ }
+ output := &GetBucketPolicyStatusOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentPolicyStatus(&output.PolicyStatus, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketPolicyStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketPolicyStatusOutput(v **GetBucketPolicyStatusOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketPolicyStatusOutput
+ if *v == nil {
+ sv = &GetBucketPolicyStatusOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("PolicyStatus", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentPolicyStatus(&sv.PolicyStatus, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketReplication struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketReplication) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketReplication(response, &metadata)
+ }
+ output := &GetBucketReplicationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentReplicationConfiguration(&output.ReplicationConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketReplicationOutput(v **GetBucketReplicationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketReplicationOutput
+ if *v == nil {
+ sv = &GetBucketReplicationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ReplicationConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationConfiguration(&sv.ReplicationConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketRequestPayment struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketRequestPayment) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketRequestPayment(response, &metadata)
+ }
+ output := &GetBucketRequestPaymentOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketRequestPaymentOutput(v **GetBucketRequestPaymentOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketRequestPaymentOutput
+ if *v == nil {
+ sv = &GetBucketRequestPaymentOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Payer", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Payer = types.Payer(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketTagging struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketTagging(response, &metadata)
+ }
+ output := &GetBucketTaggingOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketTaggingOutput(v **GetBucketTaggingOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketTaggingOutput
+ if *v == nil {
+ sv = &GetBucketTaggingOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("TagSet", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketVersioning struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketVersioning) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketVersioning(response, &metadata)
+ }
+ output := &GetBucketVersioningOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketVersioningOutput(v **GetBucketVersioningOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketVersioningOutput
+ if *v == nil {
+ sv = &GetBucketVersioningOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("MfaDelete", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.MFADelete = types.MFADeleteStatus(xtv)
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.BucketVersioningStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetBucketWebsite struct {
+}
+
+func (*awsRestxml_deserializeOpGetBucketWebsite) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetBucketWebsite(response, &metadata)
+ }
+ output := &GetBucketWebsiteOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetBucketWebsiteOutput(v **GetBucketWebsiteOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetBucketWebsiteOutput
+ if *v == nil {
+ sv = &GetBucketWebsiteOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ErrorDocument", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentErrorDocument(&sv.ErrorDocument, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("IndexDocument", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIndexDocument(&sv.IndexDocument, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("RedirectAllRequestsTo", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentRedirectAllRequestsTo(&sv.RedirectAllRequestsTo, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("RoutingRules", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentRoutingRules(&sv.RoutingRules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObject struct {
+}
+
+func (*awsRestxml_deserializeOpGetObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObject(response, &metadata)
+ }
+ output := &GetObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ err = awsRestxml_deserializeOpDocumentGetObjectOutput(output, response.Body)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("InvalidObjectState", errorCode):
+ return awsRestxml_deserializeErrorInvalidObjectState(response, errorBody)
+
+ case strings.EqualFold("NoSuchKey", errorCode):
+ return awsRestxml_deserializeErrorNoSuchKey(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.AcceptRanges = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.CacheControl = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC32 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC32C = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC64NVME = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumSHA1 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumSHA256 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumType = types.ChecksumType(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentDisposition = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentEncoding = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentLanguage = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 64)
+ if err != nil {
+ return err
+ }
+ v.ContentLength = ptr.Int64(vv)
+ }
+
+ if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentRange = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentType = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.DeleteMarker = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ETag = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
+ deserOverride, err := deserializeS3Expires(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.Expires = deserOverride
+
+ }
+
+ if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ExpiresString = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.LastModified = ptr.Time(t)
+ }
+
+ for headerKey, headerValues := range response.Header {
+ if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") {
+ if v.Metadata == nil {
+ v.Metadata = map[string]string{}
+ }
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0]
+ }
+ }
+
+ if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.MissingMeta = ptr.Int32(int32(vv))
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ObjectLockMode = types.ObjectLockMode(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseDateTime(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.ObjectLockRetainUntilDate = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.PartsCount = ptr.Int32(int32(vv))
+ }
+
+ if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ReplicationStatus = types.ReplicationStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Restore = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.StorageClass = types.StorageClass(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-tagging-count"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.TagCount = ptr.Int32(int32(vv))
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.WebsiteRedirectLocation = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetObjectOutput(v *GetObjectOutput, body io.ReadCloser) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization of nil %T", v)
+ }
+ v.Body = body
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectAcl struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectAcl) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectAcl(response, &metadata)
+ }
+ output := &GetObjectAclOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetObjectAclOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchKey", errorCode):
+ return awsRestxml_deserializeErrorNoSuchKey(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetObjectAclOutput(v *GetObjectAclOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetObjectAclOutput(v **GetObjectAclOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectAclOutput
+ if *v == nil {
+ sv = &GetObjectAclOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessControlList", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGrants(&sv.Grants, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectAttributes struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectAttributes) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectAttributes(response, &metadata)
+ }
+ output := &GetObjectAttributesOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchKey", errorCode):
+ return awsRestxml_deserializeErrorNoSuchKey(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(v *GetObjectAttributesOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.DeleteMarker = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.LastModified = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(v **GetObjectAttributesOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectAttributesOutput
+ if *v == nil {
+ sv = &GetObjectAttributesOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Checksum", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentChecksum(&sv.Checksum, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ObjectParts", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGetObjectAttributesParts(&sv.ObjectParts, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ObjectSize", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.ObjectSize = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.StorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectLegalHold struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectLegalHold) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectLegalHold(response, &metadata)
+ }
+ output := &GetObjectLegalHoldOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentObjectLockLegalHold(&output.LegalHold, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetObjectLegalHoldOutput(v **GetObjectLegalHoldOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectLegalHoldOutput
+ if *v == nil {
+ sv = &GetObjectLegalHoldOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("LegalHold", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectLockLegalHold(&sv.LegalHold, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectLockConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectLockConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response, &metadata)
+ }
+ output := &GetObjectLockConfigurationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentObjectLockConfiguration(&output.ObjectLockConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetObjectLockConfigurationOutput(v **GetObjectLockConfigurationOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectLockConfigurationOutput
+ if *v == nil {
+ sv = &GetObjectLockConfigurationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ObjectLockConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectLockConfiguration(&sv.ObjectLockConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectRetention struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectRetention) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectRetention(response, &metadata)
+ }
+ output := &GetObjectRetentionOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentObjectLockRetention(&output.Retention, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetObjectRetentionOutput(v **GetObjectRetentionOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectRetentionOutput
+ if *v == nil {
+ sv = &GetObjectRetentionOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Retention", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectLockRetention(&sv.Retention, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectTagging struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectTagging(response, &metadata)
+ }
+ output := &GetObjectTaggingOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetObjectTaggingOutput(v *GetObjectTaggingOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetObjectTaggingOutput(v **GetObjectTaggingOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetObjectTaggingOutput
+ if *v == nil {
+ sv = &GetObjectTaggingOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("TagSet", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSet(&sv.TagSet, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpGetObjectTorrent struct {
+}
+
+func (*awsRestxml_deserializeOpGetObjectTorrent) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetObjectTorrent(response, &metadata)
+ }
+ output := &GetObjectTorrentOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ err = awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(output, response.Body)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetObjectTorrent(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsGetObjectTorrentOutput(v *GetObjectTorrentOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentGetObjectTorrentOutput(v *GetObjectTorrentOutput, body io.ReadCloser) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization of nil %T", v)
+ }
+ v.Body = body
+ return nil
+}
+
+type awsRestxml_deserializeOpGetPublicAccessBlock struct {
+}
+
+func (*awsRestxml_deserializeOpGetPublicAccessBlock) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorGetPublicAccessBlock(response, &metadata)
+ }
+ output := &GetPublicAccessBlockOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&output.PublicAccessBlockConfiguration, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorGetPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentGetPublicAccessBlockOutput(v **GetPublicAccessBlockOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetPublicAccessBlockOutput
+ if *v == nil {
+ sv = &GetPublicAccessBlockOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("PublicAccessBlockConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(&sv.PublicAccessBlockConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpHeadBucket struct {
+}
+
+func (*awsRestxml_deserializeOpHeadBucket) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorHeadBucket(response, &metadata)
+ }
+ output := &HeadBucketOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorHeadBucket(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NotFound", errorCode):
+ return awsRestxml_deserializeErrorNotFound(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(v *HeadBucketOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-access-point-alias"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.AccessPointAlias = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-bucket-location-name"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.BucketLocationName = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-bucket-location-type"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.BucketLocationType = types.LocationType(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-bucket-region"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.BucketRegion = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpHeadObject struct {
+}
+
+func (*awsRestxml_deserializeOpHeadObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorHeadObject(response, &metadata)
+ }
+ output := &HeadObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorHeadObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NotFound", errorCode):
+ return awsRestxml_deserializeErrorNotFound(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("accept-ranges"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.AcceptRanges = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-archive-status"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ArchiveStatus = types.ArchiveStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.CacheControl = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC32 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC32C = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC64NVME = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumSHA1 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumSHA256 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumType = types.ChecksumType(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentDisposition = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Encoding"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentEncoding = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Language"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentLanguage = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Length"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 64)
+ if err != nil {
+ return err
+ }
+ v.ContentLength = ptr.Int64(vv)
+ }
+
+ if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentRange = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ContentType = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-delete-marker"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.DeleteMarker = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ETag = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
+ deserOverride, err := deserializeS3Expires(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.Expires = deserOverride
+
+ }
+
+ if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ExpiresString = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.LastModified = ptr.Time(t)
+ }
+
+ for headerKey, headerValues := range response.Header {
+ if lenPrefix := len("x-amz-meta-"); len(headerKey) >= lenPrefix && strings.EqualFold(headerKey[:lenPrefix], "x-amz-meta-") {
+ if v.Metadata == nil {
+ v.Metadata = map[string]string{}
+ }
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Metadata[strings.ToLower(headerKey[lenPrefix:])] = headerValues[0]
+ }
+ }
+
+ if headerValues := response.Header.Values("x-amz-missing-meta"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.MissingMeta = ptr.Int32(int32(vv))
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ObjectLockLegalHoldStatus = types.ObjectLockLegalHoldStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-mode"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ObjectLockMode = types.ObjectLockMode(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-lock-retain-until-date"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseDateTime(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.ObjectLockRetainUntilDate = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("x-amz-mp-parts-count"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 32)
+ if err != nil {
+ return err
+ }
+ v.PartsCount = ptr.Int32(int32(vv))
+ }
+
+ if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ReplicationStatus = types.ReplicationStatus(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-restore"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Restore = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-storage-class"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.StorageClass = types.StorageClass(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-website-redirect-location"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.WebsiteRedirectLocation = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpListBucketAnalyticsConfigurations struct {
+}
+
+func (*awsRestxml_deserializeOpListBucketAnalyticsConfigurations) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response, &metadata)
+ }
+ output := &ListBucketAnalyticsConfigurationsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBucketAnalyticsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(v **ListBucketAnalyticsConfigurationsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketAnalyticsConfigurationsOutput
+ if *v == nil {
+ sv = &ListBucketAnalyticsConfigurationsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AnalyticsConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(&sv.AnalyticsConfigurationList, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations struct {
+}
+
+func (*awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response, &metadata)
+ }
+ output := &ListBucketIntelligentTieringConfigurationsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBucketIntelligentTieringConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsOutput(v **ListBucketIntelligentTieringConfigurationsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketIntelligentTieringConfigurationsOutput
+ if *v == nil {
+ sv = &ListBucketIntelligentTieringConfigurationsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IntelligentTieringConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(&sv.IntelligentTieringConfigurationList, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListBucketInventoryConfigurations struct {
+}
+
+func (*awsRestxml_deserializeOpListBucketInventoryConfigurations) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response, &metadata)
+ }
+ output := &ListBucketInventoryConfigurationsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBucketInventoryConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(v **ListBucketInventoryConfigurationsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketInventoryConfigurationsOutput
+ if *v == nil {
+ sv = &ListBucketInventoryConfigurationsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("InventoryConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(&sv.InventoryConfigurationList, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListBucketMetricsConfigurations struct {
+}
+
+func (*awsRestxml_deserializeOpListBucketMetricsConfigurations) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response, &metadata)
+ }
+ output := &ListBucketMetricsConfigurationsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBucketMetricsConfigurations(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(v **ListBucketMetricsConfigurationsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketMetricsConfigurationsOutput
+ if *v == nil {
+ sv = &ListBucketMetricsConfigurationsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("MetricsConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(&sv.MetricsConfigurationList, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListBuckets struct {
+}
+
+func (*awsRestxml_deserializeOpListBuckets) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListBuckets(response, &metadata)
+ }
+ output := &ListBucketsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListBucketsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListBucketsOutput
+ if *v == nil {
+ sv = &ListBucketsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Buckets", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListDirectoryBuckets struct {
+}
+
+func (*awsRestxml_deserializeOpListDirectoryBuckets) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListDirectoryBuckets(response, &metadata)
+ }
+ output := &ListDirectoryBucketsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListDirectoryBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(v **ListDirectoryBucketsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListDirectoryBucketsOutput
+ if *v == nil {
+ sv = &ListDirectoryBucketsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Buckets", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListMultipartUploads struct {
+}
+
+func (*awsRestxml_deserializeOpListMultipartUploads) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListMultipartUploads(response, &metadata)
+ }
+ output := &ListMultipartUploadsOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(v *ListMultipartUploadsOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipartUploadsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListMultipartUploadsOutput
+ if *v == nil {
+ sv = &ListMultipartUploadsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("CommonPrefixes", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Delimiter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Delimiter = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncodingType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EncodingType = types.EncodingType(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("KeyMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KeyMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("MaxUploads", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxUploads = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("NextKeyMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextKeyMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextUploadIdMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextUploadIdMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("UploadIdMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UploadIdMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Upload", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(&sv.Uploads, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListObjects struct {
+}
+
+func (*awsRestxml_deserializeOpListObjects) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListObjects(response, &metadata)
+ }
+ output := &ListObjectsOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsListObjectsOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListObjectsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListObjects(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchBucket", errorCode):
+ return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsListObjectsOutput(v *ListObjectsOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListObjectsOutput
+ if *v == nil {
+ sv = &ListObjectsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CommonPrefixes", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Contents", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Delimiter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Delimiter = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncodingType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EncodingType = types.EncodingType(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("Marker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Marker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("MaxKeys", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxKeys = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListObjectsV2 struct {
+}
+
+func (*awsRestxml_deserializeOpListObjectsV2) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListObjectsV2(response, &metadata)
+ }
+ output := &ListObjectsV2Output{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListObjectsV2Output(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListObjectsV2(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchBucket", errorCode):
+ return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(v *ListObjectsV2Output, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListObjectsV2Output
+ if *v == nil {
+ sv = &ListObjectsV2Output{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CommonPrefixes", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Contents", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectListUnwrapped(&sv.Contents, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Delimiter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Delimiter = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncodingType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EncodingType = types.EncodingType(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("KeyCount", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.KeyCount = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("MaxKeys", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxKeys = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextContinuationToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextContinuationToken = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("StartAfter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StartAfter = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListObjectVersions struct {
+}
+
+func (*awsRestxml_deserializeOpListObjectVersions) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListObjectVersions(response, &metadata)
+ }
+ output := &ListObjectVersionsOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListObjectVersionsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListObjectVersions(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(v *ListObjectVersionsOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVersionsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListObjectVersionsOutput
+ if *v == nil {
+ sv = &ListObjectVersionsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CommonPrefixes", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("DeleteMarker", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(&sv.DeleteMarkers, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Delimiter", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Delimiter = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncodingType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EncodingType = types.EncodingType(xtv)
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("KeyMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KeyMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("MaxKeys", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxKeys = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextKeyMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextKeyMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NextVersionIdMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextVersionIdMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("VersionIdMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionIdMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Version", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectVersionListUnwrapped(&sv.Versions, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpListParts struct {
+}
+
+func (*awsRestxml_deserializeOpListParts) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorListParts(response, &metadata)
+ }
+ output := &ListPartsOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsListPartsOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeOpDocumentListPartsOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorListParts(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsListPartsOutput(v *ListPartsOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-abort-date"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ t, err := smithytime.ParseHTTPDate(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.AbortDate = ptr.Time(t)
+ }
+
+ if headerValues := response.Header.Values("x-amz-abort-rule-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.AbortRuleId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *ListPartsOutput
+ if *v == nil {
+ sv = &ListPartsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumAlgorithm", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv)
+ }
+
+ case strings.EqualFold("ChecksumType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumType = types.ChecksumType(xtv)
+ }
+
+ case strings.EqualFold("Initiator", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("MaxParts", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxParts = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("NextPartNumberMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextPartNumberMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("PartNumberMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.PartNumberMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Part", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentPartsUnwrapped(&sv.Parts, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.StorageClass(xtv)
+ }
+
+ case strings.EqualFold("UploadId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UploadId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpPutBucketAccelerateConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketAccelerateConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response, &metadata)
+ }
+ output := &PutBucketAccelerateConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketAccelerateConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketAcl struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketAcl) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketAcl(response, &metadata)
+ }
+ output := &PutBucketAclOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response, &metadata)
+ }
+ output := &PutBucketAnalyticsConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketAnalyticsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketCors struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketCors) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketCors(response, &metadata)
+ }
+ output := &PutBucketCorsOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketCors(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketEncryption struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketEncryption) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketEncryption(response, &metadata)
+ }
+ output := &PutBucketEncryptionOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketEncryption(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response, &metadata)
+ }
+ output := &PutBucketIntelligentTieringConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketIntelligentTieringConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketInventoryConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response, &metadata)
+ }
+ output := &PutBucketInventoryConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketInventoryConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketLifecycleConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketLifecycleConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response, &metadata)
+ }
+ output := &PutBucketLifecycleConfigurationOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(v *PutBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutBucketLogging struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketLogging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketLogging(response, &metadata)
+ }
+ output := &PutBucketLoggingOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketLogging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketMetricsConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response, &metadata)
+ }
+ output := &PutBucketMetricsConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketNotificationConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketNotificationConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response, &metadata)
+ }
+ output := &PutBucketNotificationConfigurationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketOwnershipControls) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response, &metadata)
+ }
+ output := &PutBucketOwnershipControlsOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketOwnershipControls(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketPolicy struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketPolicy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketPolicy(response, &metadata)
+ }
+ output := &PutBucketPolicyOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketReplication struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketReplication) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketReplication(response, &metadata)
+ }
+ output := &PutBucketReplicationOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketReplication(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketRequestPayment struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketRequestPayment) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketRequestPayment(response, &metadata)
+ }
+ output := &PutBucketRequestPaymentOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketRequestPayment(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketTagging struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketTagging(response, &metadata)
+ }
+ output := &PutBucketTaggingOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketVersioning struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketVersioning) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketVersioning(response, &metadata)
+ }
+ output := &PutBucketVersioningOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketVersioning(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutBucketWebsite struct {
+}
+
+func (*awsRestxml_deserializeOpPutBucketWebsite) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutBucketWebsite(response, &metadata)
+ }
+ output := &PutBucketWebsiteOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutBucketWebsite(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpPutObject struct {
+}
+
+func (*awsRestxml_deserializeOpPutObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObject(response, &metadata)
+ }
+ output := &PutObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("EncryptionTypeMismatch", errorCode):
+ return awsRestxml_deserializeErrorEncryptionTypeMismatch(response, errorBody)
+
+ case strings.EqualFold("InvalidRequest", errorCode):
+ return awsRestxml_deserializeErrorInvalidRequest(response, errorBody)
+
+ case strings.EqualFold("InvalidWriteOffset", errorCode):
+ return awsRestxml_deserializeErrorInvalidWriteOffset(response, errorBody)
+
+ case strings.EqualFold("TooManyParts", errorCode):
+ return awsRestxml_deserializeErrorTooManyParts(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC32 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC32C = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC64NVME = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumSHA1 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumSHA256 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumType = types.ChecksumType(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ETag = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.Expiration = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-object-size"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseInt(headerValues[0], 0, 64)
+ if err != nil {
+ return err
+ }
+ v.Size = ptr.Int64(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSEncryptionContext = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectAcl struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectAcl) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectAcl(response, &metadata)
+ }
+ output := &PutObjectAclOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectAcl(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("NoSuchKey", errorCode):
+ return awsRestxml_deserializeErrorNoSuchKey(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectAclOutput(v *PutObjectAclOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectLegalHold struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectLegalHold) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectLegalHold(response, &metadata)
+ }
+ output := &PutObjectLegalHoldOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectLegalHold(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectLegalHoldOutput(v *PutObjectLegalHoldOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectLockConfiguration struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectLockConfiguration) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response, &metadata)
+ }
+ output := &PutObjectLockConfigurationOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectLockConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectLockConfigurationOutput(v *PutObjectLockConfigurationOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectRetention struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectRetention) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectRetention(response, &metadata)
+ }
+ output := &PutObjectRetentionOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectRetention(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectRetentionOutput(v *PutObjectRetentionOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutObjectTagging struct {
+}
+
+func (*awsRestxml_deserializeOpPutObjectTagging) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutObjectTagging(response, &metadata)
+ }
+ output := &PutObjectTaggingOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutObjectTagging(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsPutObjectTaggingOutput(v *PutObjectTaggingOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.VersionId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpPutPublicAccessBlock struct {
+}
+
+func (*awsRestxml_deserializeOpPutPublicAccessBlock) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorPutPublicAccessBlock(response, &metadata)
+ }
+ output := &PutPublicAccessBlockOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorPutPublicAccessBlock(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpRestoreObject struct {
+}
+
+func (*awsRestxml_deserializeOpRestoreObject) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorRestoreObject(response, &metadata)
+ }
+ output := &RestoreObjectOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorRestoreObject(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ObjectAlreadyInActiveTierError", errorCode):
+ return awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsRestoreObjectOutput(v *RestoreObjectOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-restore-output-path"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RestoreOutputPath = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpSelectObjectContent struct {
+}
+
+func (*awsRestxml_deserializeOpSelectObjectContent) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorSelectObjectContent(response, &metadata)
+ }
+ output := &SelectObjectContentOutput{}
+ out.Result = output
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorSelectObjectContent(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsRestxml_deserializeOpUploadPart struct {
+}
+
+func (*awsRestxml_deserializeOpUploadPart) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorUploadPart(response, &metadata)
+ }
+ output := &UploadPartOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsUploadPartOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorUploadPart(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC32 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc32c"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC32C = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumCRC64NVME = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumSHA1 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-checksum-sha256"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ChecksumSHA256 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ETag = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+
+type awsRestxml_deserializeOpUploadPartCopy struct {
+}
+
+func (*awsRestxml_deserializeOpUploadPartCopy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorUploadPartCopy(response, &metadata)
+ }
+ output := &UploadPartCopyOutput{}
+ out.Result = output
+
+ err = awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(output, response)
+ if err != nil {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)}
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentCopyPartResult(&output.CopyPartResult, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorUploadPartCopy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(v *UploadPartCopyOutput, response *smithyhttp.Response) error {
+ if v == nil {
+ return fmt.Errorf("unsupported deserialization for nil %T", v)
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ vv, err := strconv.ParseBool(headerValues[0])
+ if err != nil {
+ return err
+ }
+ v.BucketKeyEnabled = ptr.Bool(vv)
+ }
+
+ if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.CopySourceVersionId = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.RequestCharged = types.RequestCharged(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerAlgorithm = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-key-MD5"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSECustomerKeyMD5 = ptr.String(headerValues[0])
+ }
+
+ if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 {
+ headerValues[0] = strings.TrimSpace(headerValues[0])
+ v.SSEKMSKeyId = ptr.String(headerValues[0])
+ }
+
+ return nil
+}
+func awsRestxml_deserializeOpDocumentUploadPartCopyOutput(v **UploadPartCopyOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *UploadPartCopyOutput
+ if *v == nil {
+ sv = &UploadPartCopyOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("CopyPartResult", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCopyPartResult(&sv.CopyPartResult, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestxml_deserializeOpWriteGetObjectResponse struct {
+}
+
+func (*awsRestxml_deserializeOpWriteGetObjectResponse) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestxml_deserializeOpErrorWriteGetObjectResponse(response, &metadata)
+ }
+ output := &WriteGetObjectResponseOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestxml_deserializeOpErrorWriteGetObjectResponse(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{
+ UseStatusCode: true, StatusCode: response.StatusCode,
+ })
+ if err != nil {
+ return err
+ }
+ if hostID := errorComponents.HostID; len(hostID) != 0 {
+ s3shared.SetHostIDMetadata(metadata, hostID)
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestxml_deserializeEventStreamSelectObjectContentEventStream(v *types.SelectObjectContentEventStream, msg *eventstream.Message) error {
+ if v == nil {
+ return fmt.Errorf("unexpected serialization of nil %T", v)
+ }
+
+ eventType := msg.Headers.Get(eventstreamapi.EventTypeHeader)
+ if eventType == nil {
+ return fmt.Errorf("%s event header not present", eventstreamapi.EventTypeHeader)
+ }
+
+ switch {
+ case strings.EqualFold("Cont", eventType.String()):
+ vv := &types.SelectObjectContentEventStreamMemberCont{}
+ if err := awsRestxml_deserializeEventMessageContinuationEvent(&vv.Value, msg); err != nil {
+ return err
+ }
+ *v = vv
+ return nil
+
+ case strings.EqualFold("End", eventType.String()):
+ vv := &types.SelectObjectContentEventStreamMemberEnd{}
+ if err := awsRestxml_deserializeEventMessageEndEvent(&vv.Value, msg); err != nil {
+ return err
+ }
+ *v = vv
+ return nil
+
+ case strings.EqualFold("Progress", eventType.String()):
+ vv := &types.SelectObjectContentEventStreamMemberProgress{}
+ if err := awsRestxml_deserializeEventMessageProgressEvent(&vv.Value, msg); err != nil {
+ return err
+ }
+ *v = vv
+ return nil
+
+ case strings.EqualFold("Records", eventType.String()):
+ vv := &types.SelectObjectContentEventStreamMemberRecords{}
+ if err := awsRestxml_deserializeEventMessageRecordsEvent(&vv.Value, msg); err != nil {
+ return err
+ }
+ *v = vv
+ return nil
+
+ case strings.EqualFold("Stats", eventType.String()):
+ vv := &types.SelectObjectContentEventStreamMemberStats{}
+ if err := awsRestxml_deserializeEventMessageStatsEvent(&vv.Value, msg); err != nil {
+ return err
+ }
+ *v = vv
+ return nil
+
+ default:
+ buffer := bytes.NewBuffer(nil)
+ eventstream.NewEncoder().Encode(buffer, *msg)
+ *v = &types.UnknownUnionMember{
+ Tag: eventType.String(),
+ Value: buffer.Bytes(),
+ }
+ return nil
+
+ }
+}
+
+func awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg *eventstream.Message) error {
+ exceptionType := msg.Headers.Get(eventstreamapi.ExceptionTypeHeader)
+ if exceptionType == nil {
+ return fmt.Errorf("%s event header not present", eventstreamapi.ExceptionTypeHeader)
+ }
+
+ switch {
+ default:
+ br := bytes.NewReader(msg.Payload)
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(br, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ errorComponents, err := awsxml.GetErrorResponseComponents(br, true)
+ if err != nil {
+ return err
+ }
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+ if ev := exceptionType.String(); len(ev) > 0 {
+ errorCode = ev
+ } else if ev := errorComponents.Code; len(ev) > 0 {
+ errorCode = ev
+ }
+ if ev := errorComponents.Message; len(ev) > 0 {
+ errorMessage = ev
+ }
+ return &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+
+ }
+}
+
+func awsRestxml_deserializeEventMessageRecordsEvent(v *types.RecordsEvent, msg *eventstream.Message) error {
+ if v == nil {
+ return fmt.Errorf("unexpected serialization of nil %T", v)
+ }
+
+ if msg.Payload != nil {
+ bsv := make([]byte, len(msg.Payload))
+ copy(bsv, msg.Payload)
+
+ v.Payload = bsv
+ }
+ return nil
+}
+
+func awsRestxml_deserializeEventMessageStatsEvent(v *types.StatsEvent, msg *eventstream.Message) error {
+ if v == nil {
+ return fmt.Errorf("unexpected serialization of nil %T", v)
+ }
+
+ br := bytes.NewReader(msg.Payload)
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(br, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentStats(&v.Details, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return nil
+}
+
+func awsRestxml_deserializeEventMessageProgressEvent(v *types.ProgressEvent, msg *eventstream.Message) error {
+ if v == nil {
+ return fmt.Errorf("unexpected serialization of nil %T", v)
+ }
+
+ br := bytes.NewReader(msg.Payload)
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(br, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentProgress(&v.Details, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return nil
+}
+
+func awsRestxml_deserializeEventMessageContinuationEvent(v *types.ContinuationEvent, msg *eventstream.Message) error {
+ if v == nil {
+ return fmt.Errorf("unexpected serialization of nil %T", v)
+ }
+
+ br := bytes.NewReader(msg.Payload)
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(br, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentContinuationEvent(&v, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return nil
+}
+
+func awsRestxml_deserializeEventMessageEndEvent(v *types.EndEvent, msg *eventstream.Message) error {
+ if v == nil {
+ return fmt.Errorf("unexpected serialization of nil %T", v)
+ }
+
+ br := bytes.NewReader(msg.Payload)
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(br, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentEndEvent(&v, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return nil
+}
+
+func awsRestxml_deserializeDocumentContinuationEvent(v **types.ContinuationEvent, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ContinuationEvent
+ if *v == nil {
+ sv = &types.ContinuationEvent{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentEndEvent(v **types.EndEvent, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.EndEvent
+ if *v == nil {
+ sv = &types.EndEvent{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Progress
+ if *v == nil {
+ sv = &types.Progress{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("BytesProcessed", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.BytesProcessed = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("BytesReturned", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.BytesReturned = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("BytesScanned", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.BytesScanned = ptr.Int64(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Stats
+ if *v == nil {
+ sv = &types.Stats{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("BytesProcessed", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.BytesProcessed = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("BytesReturned", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.BytesReturned = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("BytesScanned", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.BytesScanned = ptr.Int64(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeErrorBucketAlreadyExists(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.BucketAlreadyExists{}
+ return output
+}
+
+func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.BucketAlreadyOwnedByYou{}
+ return output
+}
+
+func awsRestxml_deserializeErrorEncryptionTypeMismatch(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.EncryptionTypeMismatch{}
+ return output
+}
+
+func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidObjectState{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ err = awsRestxml_deserializeDocumentInvalidObjectState(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsRestxml_deserializeErrorInvalidRequest(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidRequest{}
+ return output
+}
+
+func awsRestxml_deserializeErrorInvalidWriteOffset(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidWriteOffset{}
+ return output
+}
+
+func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.NoSuchBucket{}
+ return output
+}
+
+func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.NoSuchKey{}
+ return output
+}
+
+func awsRestxml_deserializeErrorNoSuchUpload(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.NoSuchUpload{}
+ return output
+}
+
+func awsRestxml_deserializeErrorNotFound(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.NotFound{}
+ return output
+}
+
+func awsRestxml_deserializeErrorObjectAlreadyInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.ObjectAlreadyInActiveTierError{}
+ return output
+}
+
+func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.ObjectNotInActiveTierError{}
+ return output
+}
+
+func awsRestxml_deserializeErrorTooManyParts(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.TooManyParts{}
+ return output
+}
+
+func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AbortIncompleteMultipartUpload
+ if *v == nil {
+ sv = &types.AbortIncompleteMultipartUpload{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DaysAfterInitiation", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.DaysAfterInitiation = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAccessControlTranslation(v **types.AccessControlTranslation, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AccessControlTranslation
+ if *v == nil {
+ sv = &types.AccessControlTranslation{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Owner", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Owner = types.OwnerOverride(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAllowedHeaders(v *[]string, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = xtv
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error {
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv string
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentAllowedMethods(v *[]string, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = xtv
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error {
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv string
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentAllowedOrigins(v *[]string, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = xtv
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error {
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv string
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentAnalyticsAndOperator(v **types.AnalyticsAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AnalyticsAndOperator
+ if *v == nil {
+ sv = &types.AnalyticsAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsConfiguration(v **types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AnalyticsConfiguration
+ if *v == nil {
+ sv = &types.AnalyticsConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("StorageClassAnalysis", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentStorageClassAnalysis(&sv.StorageClassAnalysis, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsConfigurationList(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.AnalyticsConfiguration
+ if *v == nil {
+ sv = make([]types.AnalyticsConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.AnalyticsConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsConfigurationListUnwrapped(v *[]types.AnalyticsConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.AnalyticsConfiguration
+ if *v == nil {
+ sv = make([]types.AnalyticsConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.AnalyticsConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentAnalyticsConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentAnalyticsExportDestination(v **types.AnalyticsExportDestination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AnalyticsExportDestination
+ if *v == nil {
+ sv = &types.AnalyticsExportDestination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("S3BucketDestination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsFilter(v *types.AnalyticsFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var uv types.AnalyticsFilter
+ var memberFound bool
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ if memberFound {
+ if err = decoder.Decoder.Skip(); err != nil {
+ return err
+ }
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("And", t.Name.Local):
+ var mv types.AnalyticsAndOperator
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentAnalyticsAndOperator(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.AnalyticsFilterMemberAnd{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ var mv string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ uv = &types.AnalyticsFilterMemberPrefix{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ var mv types.Tag
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.AnalyticsFilterMemberTag{Value: mv}
+ memberFound = true
+
+ default:
+ uv = &types.UnknownUnionMember{Tag: t.Name.Local}
+ memberFound = true
+
+ }
+ decoder = originalDecoder
+ }
+ *v = uv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentAnalyticsS3BucketDestination(v **types.AnalyticsS3BucketDestination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AnalyticsS3BucketDestination
+ if *v == nil {
+ sv = &types.AnalyticsS3BucketDestination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("BucketAccountId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.BucketAccountId = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Format", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Format = types.AnalyticsS3ExportFileFormat(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Bucket
+ if *v == nil {
+ sv = &types.Bucket{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("BucketRegion", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.BucketRegion = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("CreationDate", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.CreationDate = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBucketAlreadyExists(v **types.BucketAlreadyExists, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.BucketAlreadyExists
+ if *v == nil {
+ sv = &types.BucketAlreadyExists{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBucketAlreadyOwnedByYou(v **types.BucketAlreadyOwnedByYou, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.BucketAlreadyOwnedByYou
+ if *v == nil {
+ sv = &types.BucketAlreadyOwnedByYou{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBuckets(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Bucket
+ if *v == nil {
+ sv = make([]types.Bucket, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("Bucket", t.Name.Local):
+ var col types.Bucket
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentBucketsUnwrapped(v *[]types.Bucket, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Bucket
+ if *v == nil {
+ sv = make([]types.Bucket, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Bucket
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentBucket(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Checksum
+ if *v == nil {
+ sv = &types.Checksum{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ChecksumCRC32", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC32C", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32C = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC64NVME = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA1", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA1 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA256", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA256 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumType = types.ChecksumType(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentChecksumAlgorithmList(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.ChecksumAlgorithm
+ if *v == nil {
+ sv = make([]types.ChecksumAlgorithm, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.ChecksumAlgorithm
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = types.ChecksumAlgorithm(xtv)
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(v *[]types.ChecksumAlgorithm, decoder smithyxml.NodeDecoder) error {
+ var sv []types.ChecksumAlgorithm
+ if *v == nil {
+ sv = make([]types.ChecksumAlgorithm, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.ChecksumAlgorithm
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = types.ChecksumAlgorithm(xtv)
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentCommonPrefix(v **types.CommonPrefix, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.CommonPrefix
+ if *v == nil {
+ sv = &types.CommonPrefix{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCommonPrefixList(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.CommonPrefix
+ if *v == nil {
+ sv = make([]types.CommonPrefix, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.CommonPrefix
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(v *[]types.CommonPrefix, decoder smithyxml.NodeDecoder) error {
+ var sv []types.CommonPrefix
+ if *v == nil {
+ sv = make([]types.CommonPrefix, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.CommonPrefix
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentCommonPrefix(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentCondition(v **types.Condition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Condition
+ if *v == nil {
+ sv = &types.Condition{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("HttpErrorCodeReturnedEquals", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.HttpErrorCodeReturnedEquals = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("KeyPrefixEquals", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KeyPrefixEquals = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.CopyObjectResult
+ if *v == nil {
+ sv = &types.CopyObjectResult{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ChecksumCRC32", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC32C", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32C = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC64NVME = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA1", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA1 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA256", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA256 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumType = types.ChecksumType(xtv)
+ }
+
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.CopyPartResult
+ if *v == nil {
+ sv = &types.CopyPartResult{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ChecksumCRC32", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC32C", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32C = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC64NVME = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA1", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA1 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA256", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA256 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCORSRule(v **types.CORSRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.CORSRule
+ if *v == nil {
+ sv = &types.CORSRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AllowedHeader", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAllowedHeadersUnwrapped(&sv.AllowedHeaders, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("AllowedMethod", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAllowedMethodsUnwrapped(&sv.AllowedMethods, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("AllowedOrigin", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAllowedOriginsUnwrapped(&sv.AllowedOrigins, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ExposeHeader", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentExposeHeadersUnwrapped(&sv.ExposeHeaders, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("MaxAgeSeconds", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxAgeSeconds = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCORSRules(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.CORSRule
+ if *v == nil {
+ sv = make([]types.CORSRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.CORSRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentCORSRulesUnwrapped(v *[]types.CORSRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.CORSRule
+ if *v == nil {
+ sv = make([]types.CORSRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.CORSRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentCORSRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.DefaultRetention
+ if *v == nil {
+ sv = &types.DefaultRetention{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Days", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Days = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("Mode", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Mode = types.ObjectLockRetentionMode(xtv)
+ }
+
+ case strings.EqualFold("Years", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Years = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeletedObject(v **types.DeletedObject, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.DeletedObject
+ if *v == nil {
+ sv = &types.DeletedObject{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DeleteMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected DeleteMarker to be of type *bool, got %T instead", val)
+ }
+ sv.DeleteMarker = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("DeleteMarkerVersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DeleteMarkerVersionId = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("VersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeletedObjects(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.DeletedObject
+ if *v == nil {
+ sv = make([]types.DeletedObject, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.DeletedObject
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeletedObjectsUnwrapped(v *[]types.DeletedObject, decoder smithyxml.NodeDecoder) error {
+ var sv []types.DeletedObject
+ if *v == nil {
+ sv = make([]types.DeletedObject, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.DeletedObject
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentDeletedObject(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentDeleteMarkerEntry(v **types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.DeleteMarkerEntry
+ if *v == nil {
+ sv = &types.DeleteMarkerEntry{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("IsLatest", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val)
+ }
+ sv.IsLatest = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("VersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeleteMarkerReplication(v **types.DeleteMarkerReplication, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.DeleteMarkerReplication
+ if *v == nil {
+ sv = &types.DeleteMarkerReplication{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.DeleteMarkerReplicationStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeleteMarkers(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.DeleteMarkerEntry
+ if *v == nil {
+ sv = make([]types.DeleteMarkerEntry, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.DeleteMarkerEntry
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentDeleteMarkersUnwrapped(v *[]types.DeleteMarkerEntry, decoder smithyxml.NodeDecoder) error {
+ var sv []types.DeleteMarkerEntry
+ if *v == nil {
+ sv = make([]types.DeleteMarkerEntry, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.DeleteMarkerEntry
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentDeleteMarkerEntry(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentDestination(v **types.Destination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Destination
+ if *v == nil {
+ sv = &types.Destination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessControlTranslation", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAccessControlTranslation(&sv.AccessControlTranslation, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Account", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Account = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EncryptionConfiguration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentEncryptionConfiguration(&sv.EncryptionConfiguration, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Metrics", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMetrics(&sv.Metrics, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ReplicationTime", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationTime(&sv.ReplicationTime, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.StorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.EncryptionConfiguration
+ if *v == nil {
+ sv = &types.EncryptionConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ReplicaKmsKeyID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ReplicaKmsKeyID = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentEncryptionTypeMismatch(v **types.EncryptionTypeMismatch, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.EncryptionTypeMismatch
+ if *v == nil {
+ sv = &types.EncryptionTypeMismatch{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Error
+ if *v == nil {
+ sv = &types.Error{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Code", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Code = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("VersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentErrorDetails(v **types.ErrorDetails, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ErrorDetails
+ if *v == nil {
+ sv = &types.ErrorDetails{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ErrorCode", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ErrorCode = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ErrorMessage", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ErrorMessage = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ErrorDocument
+ if *v == nil {
+ sv = &types.ErrorDocument{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentErrors(v *[]types.Error, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Error
+ if *v == nil {
+ sv = make([]types.Error, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Error
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentErrorsUnwrapped(v *[]types.Error, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Error
+ if *v == nil {
+ sv = make([]types.Error, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Error
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentError(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentEventBridgeConfiguration(v **types.EventBridgeConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.EventBridgeConfiguration
+ if *v == nil {
+ sv = &types.EventBridgeConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentEventList(v *[]types.Event, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Event
+ if *v == nil {
+ sv = make([]types.Event, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Event
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = types.Event(xtv)
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentEventListUnwrapped(v *[]types.Event, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Event
+ if *v == nil {
+ sv = make([]types.Event, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Event
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = types.Event(xtv)
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentExistingObjectReplication(v **types.ExistingObjectReplication, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ExistingObjectReplication
+ if *v == nil {
+ sv = &types.ExistingObjectReplication{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ExistingObjectReplicationStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentExposeHeaders(v *[]string, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = xtv
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentExposeHeadersUnwrapped(v *[]string, decoder smithyxml.NodeDecoder) error {
+ var sv []string
+ if *v == nil {
+ sv = make([]string, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv string
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentFilterRule(v **types.FilterRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.FilterRule
+ if *v == nil {
+ sv = &types.FilterRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Name", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Name = types.FilterRuleName(xtv)
+ }
+
+ case strings.EqualFold("Value", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Value = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentFilterRuleList(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.FilterRule
+ if *v == nil {
+ sv = make([]types.FilterRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.FilterRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.FilterRule
+ if *v == nil {
+ sv = make([]types.FilterRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.FilterRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentFilterRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(v **types.GetBucketMetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.GetBucketMetadataTableConfigurationResult
+ if *v == nil {
+ sv = &types.GetBucketMetadataTableConfigurationResult{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Error", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentErrorDetails(&sv.Error, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("MetadataTableConfigurationResult", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMetadataTableConfigurationResult(&sv.MetadataTableConfigurationResult, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectAttributesParts, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.GetObjectAttributesParts
+ if *v == nil {
+ sv = &types.GetObjectAttributesParts{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("IsTruncated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val)
+ }
+ sv.IsTruncated = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("MaxParts", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.MaxParts = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("NextPartNumberMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NextPartNumberMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("PartNumberMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.PartNumberMarker = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Part", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentPartsListUnwrapped(&sv.Parts, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("PartsCount", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.TotalPartsCount = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentGrant(v **types.Grant, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Grant
+ if *v == nil {
+ sv = &types.Grant{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Grantee", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Permission", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Permission = types.Permission(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentGrantee(v **types.Grantee, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Grantee
+ if *v == nil {
+ sv = &types.Grantee{}
+ } else {
+ sv = *v
+ }
+
+ for _, attr := range decoder.StartEl.Attr {
+ name := attr.Name.Local
+ if len(attr.Name.Space) != 0 {
+ name = attr.Name.Space + `:` + attr.Name.Local
+ }
+ switch {
+ case strings.EqualFold("xsi:type", name):
+ val := []byte(attr.Value)
+ {
+ xtv := string(val)
+ sv.Type = types.Type(xtv)
+ }
+
+ }
+ }
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DisplayName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DisplayName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("EmailAddress", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.EmailAddress = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("URI", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.URI = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentGrants(v *[]types.Grant, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Grant
+ if *v == nil {
+ sv = make([]types.Grant, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("Grant", t.Name.Local):
+ var col types.Grant
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentGrantsUnwrapped(v *[]types.Grant, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Grant
+ if *v == nil {
+ sv = make([]types.Grant, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Grant
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentGrant(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentIndexDocument(v **types.IndexDocument, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IndexDocument
+ if *v == nil {
+ sv = &types.IndexDocument{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Suffix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Suffix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInitiator(v **types.Initiator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Initiator
+ if *v == nil {
+ sv = &types.Initiator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DisplayName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DisplayName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentIntelligentTieringAndOperator(v **types.IntelligentTieringAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IntelligentTieringAndOperator
+ if *v == nil {
+ sv = &types.IntelligentTieringAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentIntelligentTieringConfiguration(v **types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IntelligentTieringConfiguration
+ if *v == nil {
+ sv = &types.IntelligentTieringConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIntelligentTieringFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.IntelligentTieringStatus(xtv)
+ }
+
+ case strings.EqualFold("Tiering", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTieringListUnwrapped(&sv.Tierings, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentIntelligentTieringConfigurationList(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.IntelligentTieringConfiguration
+ if *v == nil {
+ sv = make([]types.IntelligentTieringConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.IntelligentTieringConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentIntelligentTieringConfigurationListUnwrapped(v *[]types.IntelligentTieringConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.IntelligentTieringConfiguration
+ if *v == nil {
+ sv = make([]types.IntelligentTieringConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.IntelligentTieringConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentIntelligentTieringConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentIntelligentTieringFilter(v **types.IntelligentTieringFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IntelligentTieringFilter
+ if *v == nil {
+ sv = &types.IntelligentTieringFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("And", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentIntelligentTieringAndOperator(&sv.And, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectState, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InvalidObjectState
+ if *v == nil {
+ sv = &types.InvalidObjectState{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessTier", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccessTier = types.IntelligentTieringAccessTier(xtv)
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.StorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInvalidRequest(v **types.InvalidRequest, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InvalidRequest
+ if *v == nil {
+ sv = &types.InvalidRequest{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInvalidWriteOffset(v **types.InvalidWriteOffset, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InvalidWriteOffset
+ if *v == nil {
+ sv = &types.InvalidWriteOffset{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryConfiguration
+ if *v == nil {
+ sv = &types.InventoryConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Destination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryDestination(&sv.Destination, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IncludedObjectVersions", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.IncludedObjectVersions = types.InventoryIncludedObjectVersions(xtv)
+ }
+
+ case strings.EqualFold("IsEnabled", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsEnabled to be of type *bool, got %T instead", val)
+ }
+ sv.IsEnabled = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("OptionalFields", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryOptionalFields(&sv.OptionalFields, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Schedule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventorySchedule(&sv.Schedule, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryConfigurationList(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.InventoryConfiguration
+ if *v == nil {
+ sv = make([]types.InventoryConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.InventoryConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryConfigurationListUnwrapped(v *[]types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.InventoryConfiguration
+ if *v == nil {
+ sv = make([]types.InventoryConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.InventoryConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentInventoryConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentInventoryDestination(v **types.InventoryDestination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryDestination
+ if *v == nil {
+ sv = &types.InventoryDestination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("S3BucketDestination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryS3BucketDestination(&sv.S3BucketDestination, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryEncryption(v **types.InventoryEncryption, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryEncryption
+ if *v == nil {
+ sv = &types.InventoryEncryption{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("SSE-KMS", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSSEKMS(&sv.SSEKMS, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("SSE-S3", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSSES3(&sv.SSES3, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryFilter(v **types.InventoryFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryFilter
+ if *v == nil {
+ sv = &types.InventoryFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryOptionalFields(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.InventoryOptionalField
+ if *v == nil {
+ sv = make([]types.InventoryOptionalField, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ memberDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ decoder = memberDecoder
+ switch {
+ case strings.EqualFold("Field", t.Name.Local):
+ var col types.InventoryOptionalField
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ col = types.InventoryOptionalField(xtv)
+ }
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventoryOptionalFieldsUnwrapped(v *[]types.InventoryOptionalField, decoder smithyxml.NodeDecoder) error {
+ var sv []types.InventoryOptionalField
+ if *v == nil {
+ sv = make([]types.InventoryOptionalField, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.InventoryOptionalField
+ t := decoder.StartEl
+ _ = t
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = types.InventoryOptionalField(xtv)
+ }
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentInventoryS3BucketDestination(v **types.InventoryS3BucketDestination, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventoryS3BucketDestination
+ if *v == nil {
+ sv = &types.InventoryS3BucketDestination{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccountId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccountId = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Bucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Bucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Encryption", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInventoryEncryption(&sv.Encryption, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Format", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Format = types.InventoryFormat(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentInventorySchedule(v **types.InventorySchedule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InventorySchedule
+ if *v == nil {
+ sv = &types.InventorySchedule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Frequency", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Frequency = types.InventoryFrequency(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLambdaFunctionConfiguration(v **types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LambdaFunctionConfiguration
+ if *v == nil {
+ sv = &types.LambdaFunctionConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Event", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("CloudFunction", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.LambdaFunctionArn = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLambdaFunctionConfigurationList(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.LambdaFunctionConfiguration
+ if *v == nil {
+ sv = make([]types.LambdaFunctionConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.LambdaFunctionConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(v *[]types.LambdaFunctionConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.LambdaFunctionConfiguration
+ if *v == nil {
+ sv = make([]types.LambdaFunctionConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.LambdaFunctionConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentLambdaFunctionConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpiration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LifecycleExpiration
+ if *v == nil {
+ sv = &types.LifecycleExpiration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Date", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.Date = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Days", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Days = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("ExpiredObjectDeleteMarker", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected ExpiredObjectDeleteMarker to be of type *bool, got %T instead", val)
+ }
+ sv.ExpiredObjectDeleteMarker = ptr.Bool(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRule(v **types.LifecycleRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LifecycleRule
+ if *v == nil {
+ sv = &types.LifecycleRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AbortIncompleteMultipartUpload", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(&sv.AbortIncompleteMultipartUpload, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Expiration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLifecycleExpiration(&sv.Expiration, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLifecycleRuleFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NoncurrentVersionExpiration", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNoncurrentVersionExpiration(&sv.NoncurrentVersionExpiration, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("NoncurrentVersionTransition", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(&sv.NoncurrentVersionTransitions, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ExpirationStatus(xtv)
+ }
+
+ case strings.EqualFold("Transition", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTransitionListUnwrapped(&sv.Transitions, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleRuleAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LifecycleRuleAndOperator
+ if *v == nil {
+ sv = &types.LifecycleRuleAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.ObjectSizeGreaterThan = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("ObjectSizeLessThan", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.ObjectSizeLessThan = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRuleFilter(v **types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LifecycleRuleFilter
+ if *v == nil {
+ sv = &types.LifecycleRuleFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("And", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&sv.And, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.ObjectSizeGreaterThan = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("ObjectSizeLessThan", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.ObjectSizeLessThan = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRules(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.LifecycleRule
+ if *v == nil {
+ sv = make([]types.LifecycleRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.LifecycleRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentLifecycleRulesUnwrapped(v *[]types.LifecycleRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.LifecycleRule
+ if *v == nil {
+ sv = make([]types.LifecycleRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.LifecycleRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentLifecycleRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.LoggingEnabled
+ if *v == nil {
+ sv = &types.LoggingEnabled{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("TargetBucket", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TargetBucket = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("TargetGrants", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTargetGrants(&sv.TargetGrants, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("TargetObjectKeyFormat", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTargetObjectKeyFormat(&sv.TargetObjectKeyFormat, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("TargetPrefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TargetPrefix = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetadataTableConfigurationResult(v **types.MetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.MetadataTableConfigurationResult
+ if *v == nil {
+ sv = &types.MetadataTableConfigurationResult{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("S3TablesDestinationResult", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentS3TablesDestinationResult(&sv.S3TablesDestinationResult, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Metrics
+ if *v == nil {
+ sv = &types.Metrics{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("EventThreshold", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.EventThreshold, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.MetricsStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetricsAndOperator(v **types.MetricsAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.MetricsAndOperator
+ if *v == nil {
+ sv = &types.MetricsAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessPointArn", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccessPointArn = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetricsConfiguration(v **types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.MetricsConfiguration
+ if *v == nil {
+ sv = &types.MetricsConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentMetricsFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetricsConfigurationList(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.MetricsConfiguration
+ if *v == nil {
+ sv = make([]types.MetricsConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.MetricsConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMetricsConfigurationListUnwrapped(v *[]types.MetricsConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.MetricsConfiguration
+ if *v == nil {
+ sv = make([]types.MetricsConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.MetricsConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentMetricsConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentMetricsFilter(v *types.MetricsFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var uv types.MetricsFilter
+ var memberFound bool
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ if memberFound {
+ if err = decoder.Decoder.Skip(); err != nil {
+ return err
+ }
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessPointArn", t.Name.Local):
+ var mv string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ uv = &types.MetricsFilterMemberAccessPointArn{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("And", t.Name.Local):
+ var mv types.MetricsAndOperator
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentMetricsAndOperator(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.MetricsFilterMemberAnd{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ var mv string
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ mv = xtv
+ }
+ uv = &types.MetricsFilterMemberPrefix{Value: mv}
+ memberFound = true
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ var mv types.Tag
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ uv = &types.MetricsFilterMemberTag{Value: mv}
+ memberFound = true
+
+ default:
+ uv = &types.UnknownUnionMember{Tag: t.Name.Local}
+ memberFound = true
+
+ }
+ decoder = originalDecoder
+ }
+ *v = uv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.MultipartUpload
+ if *v == nil {
+ sv = &types.MultipartUpload{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ChecksumAlgorithm", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv)
+ }
+
+ case strings.EqualFold("ChecksumType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumType = types.ChecksumType(xtv)
+ }
+
+ case strings.EqualFold("Initiated", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.Initiated = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Initiator", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.StorageClass(xtv)
+ }
+
+ case strings.EqualFold("UploadId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UploadId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMultipartUploadList(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.MultipartUpload
+ if *v == nil {
+ sv = make([]types.MultipartUpload, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.MultipartUpload
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentMultipartUploadListUnwrapped(v *[]types.MultipartUpload, decoder smithyxml.NodeDecoder) error {
+ var sv []types.MultipartUpload
+ if *v == nil {
+ sv = make([]types.MultipartUpload, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.MultipartUpload
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentMultipartUpload(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.NoncurrentVersionExpiration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoncurrentVersionExpiration
+ if *v == nil {
+ sv = &types.NoncurrentVersionExpiration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.NewerNoncurrentVersions = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("NoncurrentDays", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.NoncurrentDays = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoncurrentVersionTransition
+ if *v == nil {
+ sv = &types.NoncurrentVersionTransition{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("NewerNoncurrentVersions", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.NewerNoncurrentVersions = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("NoncurrentDays", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.NoncurrentDays = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.TransitionStorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoncurrentVersionTransitionList(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.NoncurrentVersionTransition
+ if *v == nil {
+ sv = make([]types.NoncurrentVersionTransition, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.NoncurrentVersionTransition
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoncurrentVersionTransitionListUnwrapped(v *[]types.NoncurrentVersionTransition, decoder smithyxml.NodeDecoder) error {
+ var sv []types.NoncurrentVersionTransition
+ if *v == nil {
+ sv = make([]types.NoncurrentVersionTransition, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.NoncurrentVersionTransition
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentNoncurrentVersionTransition(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentNoSuchBucket(v **types.NoSuchBucket, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoSuchBucket
+ if *v == nil {
+ sv = &types.NoSuchBucket{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoSuchKey(v **types.NoSuchKey, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoSuchKey
+ if *v == nil {
+ sv = &types.NoSuchKey{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNoSuchUpload(v **types.NoSuchUpload, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NoSuchUpload
+ if *v == nil {
+ sv = &types.NoSuchUpload{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNotFound(v **types.NotFound, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NotFound
+ if *v == nil {
+ sv = &types.NotFound{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentNotificationConfigurationFilter(v **types.NotificationConfigurationFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.NotificationConfigurationFilter
+ if *v == nil {
+ sv = &types.NotificationConfigurationFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("S3Key", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentS3KeyFilter(&sv.Key, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Object
+ if *v == nil {
+ sv = &types.Object{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ChecksumAlgorithm", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ChecksumType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumType = types.ChecksumType(xtv)
+ }
+
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("RestoreStatus", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Size", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Size = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.ObjectStorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectAlreadyInActiveTierError(v **types.ObjectAlreadyInActiveTierError, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectAlreadyInActiveTierError
+ if *v == nil {
+ sv = &types.ObjectAlreadyInActiveTierError{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectList(v *[]types.Object, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Object
+ if *v == nil {
+ sv = make([]types.Object, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Object
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectListUnwrapped(v *[]types.Object, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Object
+ if *v == nil {
+ sv = make([]types.Object, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Object
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentObject(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentObjectLockConfiguration(v **types.ObjectLockConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectLockConfiguration
+ if *v == nil {
+ sv = &types.ObjectLockConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ObjectLockEnabled", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ObjectLockEnabled = types.ObjectLockEnabled(xtv)
+ }
+
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentObjectLockRule(&sv.Rule, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectLockLegalHold(v **types.ObjectLockLegalHold, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectLockLegalHold
+ if *v == nil {
+ sv = &types.ObjectLockLegalHold{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ObjectLockLegalHoldStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectLockRetention(v **types.ObjectLockRetention, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectLockRetention
+ if *v == nil {
+ sv = &types.ObjectLockRetention{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Mode", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Mode = types.ObjectLockRetentionMode(xtv)
+ }
+
+ case strings.EqualFold("RetainUntilDate", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.RetainUntilDate = ptr.Time(t)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectLockRule(v **types.ObjectLockRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectLockRule
+ if *v == nil {
+ sv = &types.ObjectLockRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DefaultRetention", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDefaultRetention(&sv.DefaultRetention, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectNotInActiveTierError(v **types.ObjectNotInActiveTierError, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectNotInActiveTierError
+ if *v == nil {
+ sv = &types.ObjectNotInActiveTierError{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectPart
+ if *v == nil {
+ sv = &types.ObjectPart{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ChecksumCRC32", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC32C", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32C = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC64NVME = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA1", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA1 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA256", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA256 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("PartNumber", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PartNumber = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("Size", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Size = ptr.Int64(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ObjectVersion
+ if *v == nil {
+ sv = &types.ObjectVersion{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ChecksumAlgorithm", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentChecksumAlgorithmListUnwrapped(&sv.ChecksumAlgorithm, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ChecksumType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumType = types.ChecksumType(xtv)
+ }
+
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("IsLatest", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val)
+ }
+ sv.IsLatest = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Owner", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("RestoreStatus", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Size", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Size = ptr.Int64(i64)
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.ObjectVersionStorageClass(xtv)
+ }
+
+ case strings.EqualFold("VersionId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.VersionId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectVersionList(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.ObjectVersion
+ if *v == nil {
+ sv = make([]types.ObjectVersion, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.ObjectVersion
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentObjectVersionListUnwrapped(v *[]types.ObjectVersion, decoder smithyxml.NodeDecoder) error {
+ var sv []types.ObjectVersion
+ if *v == nil {
+ sv = make([]types.ObjectVersion, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.ObjectVersion
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentObjectVersion(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentOwner(v **types.Owner, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Owner
+ if *v == nil {
+ sv = &types.Owner{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DisplayName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DisplayName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentOwnershipControls(v **types.OwnershipControls, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.OwnershipControls
+ if *v == nil {
+ sv = &types.OwnershipControls{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentOwnershipControlsRule(v **types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.OwnershipControlsRule
+ if *v == nil {
+ sv = &types.OwnershipControlsRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ObjectOwnership", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ObjectOwnership = types.ObjectOwnership(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentOwnershipControlsRules(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.OwnershipControlsRule
+ if *v == nil {
+ sv = make([]types.OwnershipControlsRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.OwnershipControlsRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentOwnershipControlsRulesUnwrapped(v *[]types.OwnershipControlsRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.OwnershipControlsRule
+ if *v == nil {
+ sv = make([]types.OwnershipControlsRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.OwnershipControlsRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentOwnershipControlsRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Part
+ if *v == nil {
+ sv = &types.Part{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ChecksumCRC32", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC32C", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC32C = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumCRC64NVME = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA1", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA1 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ChecksumSHA256", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ChecksumSHA256 = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ETag", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ETag = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("LastModified", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.LastModified = ptr.Time(t)
+ }
+
+ case strings.EqualFold("PartNumber", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PartNumber = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("Size", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Size = ptr.Int64(i64)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentPartitionedPrefix(v **types.PartitionedPrefix, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.PartitionedPrefix
+ if *v == nil {
+ sv = &types.PartitionedPrefix{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("PartitionDateSource", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.PartitionDateSource = types.PartitionDateSource(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentParts(v *[]types.Part, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Part
+ if *v == nil {
+ sv = make([]types.Part, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Part
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentPartsUnwrapped(v *[]types.Part, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Part
+ if *v == nil {
+ sv = make([]types.Part, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Part
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentPart(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentPartsList(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.ObjectPart
+ if *v == nil {
+ sv = make([]types.ObjectPart, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.ObjectPart
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentPartsListUnwrapped(v *[]types.ObjectPart, decoder smithyxml.NodeDecoder) error {
+ var sv []types.ObjectPart
+ if *v == nil {
+ sv = make([]types.ObjectPart, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.ObjectPart
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentObjectPart(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentPolicyStatus(v **types.PolicyStatus, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.PolicyStatus
+ if *v == nil {
+ sv = &types.PolicyStatus{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("IsPublic", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsPublic to be of type *bool, got %T instead", val)
+ }
+ sv.IsPublic = ptr.Bool(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.PublicAccessBlockConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.PublicAccessBlockConfiguration
+ if *v == nil {
+ sv = &types.PublicAccessBlockConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("BlockPublicAcls", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val)
+ }
+ sv.BlockPublicAcls = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("BlockPublicPolicy", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val)
+ }
+ sv.BlockPublicPolicy = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("IgnorePublicAcls", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val)
+ }
+ sv.IgnorePublicAcls = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("RestrictPublicBuckets", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val)
+ }
+ sv.RestrictPublicBuckets = ptr.Bool(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentQueueConfiguration(v **types.QueueConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.QueueConfiguration
+ if *v == nil {
+ sv = &types.QueueConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Event", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Queue", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.QueueArn = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentQueueConfigurationList(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.QueueConfiguration
+ if *v == nil {
+ sv = make([]types.QueueConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.QueueConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentQueueConfigurationListUnwrapped(v *[]types.QueueConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.QueueConfiguration
+ if *v == nil {
+ sv = make([]types.QueueConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.QueueConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentQueueConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentRedirect(v **types.Redirect, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Redirect
+ if *v == nil {
+ sv = &types.Redirect{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("HostName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.HostName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("HttpRedirectCode", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.HttpRedirectCode = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Protocol", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Protocol = types.Protocol(xtv)
+ }
+
+ case strings.EqualFold("ReplaceKeyPrefixWith", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ReplaceKeyPrefixWith = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("ReplaceKeyWith", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ReplaceKeyWith = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentRedirectAllRequestsTo(v **types.RedirectAllRequestsTo, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.RedirectAllRequestsTo
+ if *v == nil {
+ sv = &types.RedirectAllRequestsTo{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("HostName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.HostName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Protocol", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Protocol = types.Protocol(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicaModifications(v **types.ReplicaModifications, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicaModifications
+ if *v == nil {
+ sv = &types.ReplicaModifications{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ReplicaModificationsStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationConfiguration(v **types.ReplicationConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationConfiguration
+ if *v == nil {
+ sv = &types.ReplicationConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Role", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Role = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRule(v **types.ReplicationRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationRule
+ if *v == nil {
+ sv = &types.ReplicationRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DeleteMarkerReplication", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDeleteMarkerReplication(&sv.DeleteMarkerReplication, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Destination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentDestination(&sv.Destination, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ExistingObjectReplication", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentExistingObjectReplication(&sv.ExistingObjectReplication, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationRuleFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("ID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.ID = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Priority", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Priority = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("SourceSelectionCriteria", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSourceSelectionCriteria(&sv.SourceSelectionCriteria, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ReplicationRuleStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.ReplicationRuleAndOperator, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationRuleAndOperator
+ if *v == nil {
+ sv = &types.ReplicationRuleAndOperator{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTagSetUnwrapped(&sv.Tags, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRuleFilter(v **types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationRuleFilter
+ if *v == nil {
+ sv = &types.ReplicationRuleFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("And", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&sv.And, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Prefix", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Prefix = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Tag", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRules(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.ReplicationRule
+ if *v == nil {
+ sv = make([]types.ReplicationRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.ReplicationRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationRulesUnwrapped(v *[]types.ReplicationRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.ReplicationRule
+ if *v == nil {
+ sv = make([]types.ReplicationRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.ReplicationRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentReplicationRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentReplicationTime(v **types.ReplicationTime, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationTime
+ if *v == nil {
+ sv = &types.ReplicationTime{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.ReplicationTimeStatus(xtv)
+ }
+
+ case strings.EqualFold("Time", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicationTimeValue(&sv.Time, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentReplicationTimeValue(v **types.ReplicationTimeValue, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ReplicationTimeValue
+ if *v == nil {
+ sv = &types.ReplicationTimeValue{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Minutes", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Minutes = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentRestoreStatus(v **types.RestoreStatus, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.RestoreStatus
+ if *v == nil {
+ sv = &types.RestoreStatus{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("IsRestoreInProgress", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected IsRestoreInProgress to be of type *bool, got %T instead", val)
+ }
+ sv.IsRestoreInProgress = ptr.Bool(xtv)
+ }
+
+ case strings.EqualFold("RestoreExpiryDate", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.RestoreExpiryDate = ptr.Time(t)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentRoutingRule(v **types.RoutingRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.RoutingRule
+ if *v == nil {
+ sv = &types.RoutingRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Condition", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentCondition(&sv.Condition, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Redirect", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentRedirect(&sv.Redirect, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentRoutingRules(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.RoutingRule
+ if *v == nil {
+ sv = make([]types.RoutingRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("RoutingRule", t.Name.Local):
+ var col types.RoutingRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentRoutingRulesUnwrapped(v *[]types.RoutingRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.RoutingRule
+ if *v == nil {
+ sv = make([]types.RoutingRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.RoutingRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentRoutingRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.S3KeyFilter
+ if *v == nil {
+ sv = &types.S3KeyFilter{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("FilterRule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentFilterRuleListUnwrapped(&sv.FilterRules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentS3TablesDestinationResult(v **types.S3TablesDestinationResult, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.S3TablesDestinationResult
+ if *v == nil {
+ sv = &types.S3TablesDestinationResult{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("TableArn", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TableArn = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("TableBucketArn", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TableBucketArn = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("TableName", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TableName = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("TableNamespace", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TableNamespace = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ServerSideEncryptionByDefault
+ if *v == nil {
+ sv = &types.ServerSideEncryptionByDefault{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("KMSMasterKeyID", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KMSMasterKeyID = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SSEAlgorithm", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SSEAlgorithm = types.ServerSideEncryption(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionConfiguration(v **types.ServerSideEncryptionConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ServerSideEncryptionConfiguration
+ if *v == nil {
+ sv = &types.ServerSideEncryptionConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Rule", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(&sv.Rules, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionRule(v **types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ServerSideEncryptionRule
+ if *v == nil {
+ sv = &types.ServerSideEncryptionRule{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ApplyServerSideEncryptionByDefault", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionByDefault(&sv.ApplyServerSideEncryptionByDefault, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("BucketKeyEnabled", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv, err := strconv.ParseBool(string(val))
+ if err != nil {
+ return fmt.Errorf("expected BucketKeyEnabled to be of type *bool, got %T instead", val)
+ }
+ sv.BucketKeyEnabled = ptr.Bool(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionRules(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.ServerSideEncryptionRule
+ if *v == nil {
+ sv = make([]types.ServerSideEncryptionRule, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.ServerSideEncryptionRule
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(v *[]types.ServerSideEncryptionRule, decoder smithyxml.NodeDecoder) error {
+ var sv []types.ServerSideEncryptionRule
+ if *v == nil {
+ sv = make([]types.ServerSideEncryptionRule, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.ServerSideEncryptionRule
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentServerSideEncryptionRule(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentSessionCredentials(v **types.SessionCredentials, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SessionCredentials
+ if *v == nil {
+ sv = &types.SessionCredentials{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessKeyId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccessKeyId = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Expiration", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.Expiration = ptr.Time(t)
+ }
+
+ case strings.EqualFold("SecretAccessKey", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SecretAccessKey = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SessionToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SessionToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentSimplePrefix(v **types.SimplePrefix, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SimplePrefix
+ if *v == nil {
+ sv = &types.SimplePrefix{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentSourceSelectionCriteria(v **types.SourceSelectionCriteria, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SourceSelectionCriteria
+ if *v == nil {
+ sv = &types.SourceSelectionCriteria{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("ReplicaModifications", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentReplicaModifications(&sv.ReplicaModifications, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("SseKmsEncryptedObjects", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSseKmsEncryptedObjects(&sv.SseKmsEncryptedObjects, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentSSEKMS(v **types.SSEKMS, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SSEKMS
+ if *v == nil {
+ sv = &types.SSEKMS{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("KeyId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.KeyId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentSseKmsEncryptedObjects(v **types.SseKmsEncryptedObjects, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SseKmsEncryptedObjects
+ if *v == nil {
+ sv = &types.SseKmsEncryptedObjects{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Status", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Status = types.SseKmsEncryptedObjectsStatus(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentSSES3(v **types.SSES3, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.SSES3
+ if *v == nil {
+ sv = &types.SSES3{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentStorageClassAnalysis(v **types.StorageClassAnalysis, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.StorageClassAnalysis
+ if *v == nil {
+ sv = &types.StorageClassAnalysis{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DataExport", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(&sv.DataExport, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentStorageClassAnalysisDataExport(v **types.StorageClassAnalysisDataExport, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.StorageClassAnalysisDataExport
+ if *v == nil {
+ sv = &types.StorageClassAnalysisDataExport{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Destination", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentAnalyticsExportDestination(&sv.Destination, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("OutputSchemaVersion", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.OutputSchemaVersion = types.StorageClassAnalysisSchemaVersion(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTag(v **types.Tag, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Tag
+ if *v == nil {
+ sv = &types.Tag{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Key", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Key = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Value", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Value = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTagSet(v *[]types.Tag, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Tag
+ if *v == nil {
+ sv = make([]types.Tag, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("Tag", t.Name.Local):
+ var col types.Tag
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTagSetUnwrapped(v *[]types.Tag, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Tag
+ if *v == nil {
+ sv = make([]types.Tag, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Tag
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentTargetGrant(v **types.TargetGrant, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.TargetGrant
+ if *v == nil {
+ sv = &types.TargetGrant{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Grantee", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentGrantee(&sv.Grantee, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Permission", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Permission = types.BucketLogsPermission(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTargetGrants(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.TargetGrant
+ if *v == nil {
+ sv = make([]types.TargetGrant, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("Grant", t.Name.Local):
+ var col types.TargetGrant
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTargetGrantsUnwrapped(v *[]types.TargetGrant, decoder smithyxml.NodeDecoder) error {
+ var sv []types.TargetGrant
+ if *v == nil {
+ sv = make([]types.TargetGrant, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.TargetGrant
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTargetGrant(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentTargetObjectKeyFormat(v **types.TargetObjectKeyFormat, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.TargetObjectKeyFormat
+ if *v == nil {
+ sv = &types.TargetObjectKeyFormat{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("PartitionedPrefix", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentPartitionedPrefix(&sv.PartitionedPrefix, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("SimplePrefix", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentSimplePrefix(&sv.SimplePrefix, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Tiering
+ if *v == nil {
+ sv = &types.Tiering{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessTier", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccessTier = types.IntelligentTieringAccessTier(xtv)
+ }
+
+ case strings.EqualFold("Days", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Days = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTieringList(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Tiering
+ if *v == nil {
+ sv = make([]types.Tiering, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Tiering
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Tiering
+ if *v == nil {
+ sv = make([]types.Tiering, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Tiering
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTiering(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentTooManyParts(v **types.TooManyParts, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.TooManyParts
+ if *v == nil {
+ sv = &types.TooManyParts{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.TopicConfiguration
+ if *v == nil {
+ sv = &types.TopicConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Event", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentEventListUnwrapped(&sv.Events, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Filter", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsRestxml_deserializeDocumentNotificationConfigurationFilter(&sv.Filter, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Id", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Id = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Topic", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.TopicArn = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTopicConfigurationList(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.TopicConfiguration
+ if *v == nil {
+ sv = make([]types.TopicConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.TopicConfiguration
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTopicConfigurationListUnwrapped(v *[]types.TopicConfiguration, decoder smithyxml.NodeDecoder) error {
+ var sv []types.TopicConfiguration
+ if *v == nil {
+ sv = make([]types.TopicConfiguration, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.TopicConfiguration
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTopicConfiguration(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
+func awsRestxml_deserializeDocumentTransition(v **types.Transition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Transition
+ if *v == nil {
+ sv = &types.Transition{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Date", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.Date = ptr.Time(t)
+ }
+
+ case strings.EqualFold("Days", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.Days = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("StorageClass", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.StorageClass = types.TransitionStorageClass(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTransitionList(v *[]types.Transition, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv []types.Transition
+ if *v == nil {
+ sv = make([]types.Transition, 0)
+ } else {
+ sv = *v
+ }
+
+ originalDecoder := decoder
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ switch {
+ case strings.EqualFold("member", t.Name.Local):
+ var col types.Transition
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &col
+ if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ col = *destAddr
+ sv = append(sv, col)
+
+ default:
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestxml_deserializeDocumentTransitionListUnwrapped(v *[]types.Transition, decoder smithyxml.NodeDecoder) error {
+ var sv []types.Transition
+ if *v == nil {
+ sv = make([]types.Transition, 0)
+ } else {
+ sv = *v
+ }
+
+ switch {
+ default:
+ var mv types.Transition
+ t := decoder.StartEl
+ _ = t
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ destAddr := &mv
+ if err := awsRestxml_deserializeDocumentTransition(&destAddr, nodeDecoder); err != nil {
+ return err
+ }
+ mv = *destAddr
+ sv = append(sv, mv)
+ }
+ *v = sv
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go
new file mode 100644
index 000000000..d825a41a7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/doc.go
@@ -0,0 +1,5 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package s3 provides the API client, operations, and parameter types for Amazon
+// Simple Storage Service.
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go
new file mode 100644
index 000000000..bb5f4cf47
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go
@@ -0,0 +1,126 @@
+package s3
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ smithyauth "github.com/aws/smithy-go/auth"
+)
+
+type endpointAuthResolver struct {
+ EndpointResolver EndpointResolverV2
+}
+
+var _ AuthSchemeResolver = (*endpointAuthResolver)(nil)
+
+func (r *endpointAuthResolver) ResolveAuthSchemes(
+ ctx context.Context, params *AuthResolverParameters,
+) (
+ []*smithyauth.Option, error,
+) {
+ if params.endpointParams.Region == nil {
+ // #2502: We're correcting the endpoint binding behavior to treat empty
+ // Region as "unset" (nil), but auth resolution technically doesn't
+ // care and someone could be using V1 or non-default V2 endpoint
+ // resolution, both of which would bypass the required-region check.
+ // They shouldn't be broken because the region is technically required
+ // by this service's endpoint-based auth resolver, so we stub it here.
+ params.endpointParams.Region = aws.String("")
+ }
+
+ opts, err := r.resolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ // canonicalize sigv4-s3express ID
+ for _, opt := range opts {
+ if opt.SchemeID == "sigv4-s3express" {
+ opt.SchemeID = "com.amazonaws.s3#sigv4express"
+ }
+ }
+
+ // preserve pre-SRA behavior where everything technically had anonymous
+ return append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ }), nil
+}
+
+func (r *endpointAuthResolver) resolveAuthSchemes(
+ ctx context.Context, params *AuthResolverParameters,
+) (
+ []*smithyauth.Option, error,
+) {
+ baseOpts, err := (&defaultAuthSchemeResolver{}).ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, fmt.Errorf("get base options: %w", err)
+ }
+
+ endpt, err := r.EndpointResolver.ResolveEndpoint(ctx, *params.endpointParams)
+ if err != nil {
+ return nil, fmt.Errorf("resolve endpoint: %w", err)
+ }
+
+ endptOpts, ok := smithyauth.GetAuthOptions(&endpt.Properties)
+ if !ok {
+ return baseOpts, nil
+ }
+
+ // the list of options from the endpoint is authoritative, however, the
+ // modeled options have some properties that the endpoint ones don't, so we
+ // start from the latter and merge in
+ for _, endptOpt := range endptOpts {
+ if baseOpt := findScheme(baseOpts, endptOpt.SchemeID); baseOpt != nil {
+ rebaseProps(endptOpt, baseOpt)
+ }
+ }
+
+ return endptOpts, nil
+}
+
+// rebase the properties of dst, taking src as the base and overlaying those
+// from dst
+func rebaseProps(dst, src *smithyauth.Option) {
+ iprops, sprops := src.IdentityProperties, src.SignerProperties
+
+ iprops.SetAll(&dst.IdentityProperties)
+ sprops.SetAll(&dst.SignerProperties)
+
+ dst.IdentityProperties = iprops
+ dst.SignerProperties = sprops
+}
+
+func findScheme(opts []*smithyauth.Option, schemeID string) *smithyauth.Option {
+ for _, opt := range opts {
+ if opt.SchemeID == schemeID {
+ return opt
+ }
+ }
+ return nil
+}
+
+func finalizeServiceEndpointAuthResolver(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &endpointAuthResolver{
+ EndpointResolver: options.EndpointResolverV2,
+ }
+}
+
+func finalizeOperationEndpointAuthResolver(options *Options) {
+ resolver, ok := options.AuthSchemeResolver.(*endpointAuthResolver)
+ if !ok {
+ return
+ }
+
+ if resolver.EndpointResolver == options.EndpointResolverV2 {
+ return
+ }
+
+ options.AuthSchemeResolver = &endpointAuthResolver{
+ EndpointResolver: options.EndpointResolverV2,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go
new file mode 100644
index 000000000..1a4af2822
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go
@@ -0,0 +1,6423 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
+ "github.com/aws/smithy-go/endpoints/private/rulesfn"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+ return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return fn(region, options)
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+ e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+ for _, fn := range optFns {
+ fn(&e)
+ }
+
+ return EndpointResolverFunc(
+ func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+ if len(e.SigningRegion) == 0 {
+ e.SigningRegion = region
+ }
+ return e, nil
+ },
+ )
+}
+
+type ResolveEndpoint struct {
+ Resolver EndpointResolver
+ Options EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.Resolver == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ eo := m.Options
+ eo.Logger = middleware.GetLogger(ctx)
+
+ var endpoint aws.Endpoint
+ endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
+ if err != nil {
+ nf := (&aws.EndpointNotFoundError{})
+ if errors.As(err, &nf) {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+ return next.HandleSerialize(ctx, in)
+ }
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+ signingName := endpoint.SigningName
+ if len(signingName) == 0 {
+ signingName = "s3"
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ }
+ ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+ ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+ return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Serialize.Insert(&ResolveEndpoint{
+ Resolver: o.EndpointResolver,
+ Options: o.EndpointOptions,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+ return err
+}
+
+type wrappedEndpointResolver struct {
+ awsResolver aws.EndpointResolverWithOptions
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
+}
+
+type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
+
+func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
+ return a(service, region)
+}
+
+var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
+
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
+//
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
+ var resolver aws.EndpointResolverWithOptions
+
+ if awsResolverWithOptions != nil {
+ resolver = awsResolverWithOptions
+ } else if awsResolver != nil {
+ resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint)
+ }
+
+ return &wrappedEndpointResolver{
+ awsResolver: resolver,
+ }
+}
+
+func finalizeClientEndpointResolverOptions(options *Options) {
+ options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage()
+
+ if len(options.EndpointOptions.ResolvedRegion) == 0 {
+ const fipsInfix = "-fips-"
+ const fipsPrefix = "fips-"
+ const fipsSuffix = "-fips"
+
+ if strings.Contains(options.Region, fipsInfix) ||
+ strings.Contains(options.Region, fipsPrefix) ||
+ strings.Contains(options.Region, fipsSuffix) {
+ options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(
+ options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "")
+ options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+ }
+ }
+
+ if options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateUnset {
+ if options.UseDualstack {
+ options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled
+ } else {
+ options.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled
+ }
+ }
+
+}
+
+func resolveEndpointResolverV2(options *Options) {
+ if options.EndpointResolverV2 == nil {
+ options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+ }
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+ if cfg.BaseEndpoint != nil {
+ o.BaseEndpoint = cfg.BaseEndpoint
+ }
+
+ _, g := os.LookupEnv("AWS_ENDPOINT_URL")
+ _, s := os.LookupEnv("AWS_ENDPOINT_URL_S3")
+
+ if g && !s {
+ return
+ }
+
+ value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "S3", cfg.ConfigSources)
+ if found && err == nil {
+ o.BaseEndpoint = &value
+ }
+}
+
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+ // The S3 bucket used to send the request. This is an optional parameter that will
+ // be set automatically for operations that are scoped to an S3 bucket.
+ //
+ // Parameter
+ // is required.
+ Bucket *string
+
+ // The AWS region used to dispatch the request.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Region
+ Region *string
+
+ // When true, send this request to the FIPS-compliant regional endpoint. If the
+ // configured endpoint does not have a FIPS compliant endpoint, dispatching the
+ // request will return an error.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::UseFIPS
+ UseFIPS *bool
+
+ // When true, use the dual-stack endpoint. If the configured endpoint does not
+ // support dual-stack, dispatching the request MAY return an error.
+ //
+ // Defaults to
+ // false if no value is provided.
+ //
+ // AWS::UseDualStack
+ UseDualStack *bool
+
+ // Override the endpoint used to send this request
+ //
+ // Parameter is
+ // required.
+ //
+ // SDK::Endpoint
+ Endpoint *string
+
+ // When true, force a path-style endpoint to be used where the bucket name is part
+ // of the path.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::S3::ForcePathStyle
+ ForcePathStyle *bool
+
+ // When true, use S3 Accelerate. NOTE: Not all regions support S3
+ // accelerate.
+ //
+ // Defaults to false if no value is provided.
+ //
+ // AWS::S3::Accelerate
+ Accelerate *bool
+
+ // Whether the global endpoint should be used, rather then the regional endpoint
+ // for us-east-1.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::S3::UseGlobalEndpoint
+ UseGlobalEndpoint *bool
+
+ // Internal parameter to use object lambda endpoint for an operation (eg:
+ // WriteGetObjectResponse)
+ //
+ // Parameter is required.
+ UseObjectLambdaEndpoint *bool
+
+ // The S3 Key used to send the request. This is an optional parameter that will be
+ // set automatically for operations that are scoped to an S3 Key.
+ //
+ // Parameter is
+ // required.
+ Key *string
+
+ // The S3 Prefix used to send the request. This is an optional parameter that will
+ // be set automatically for operations that are scoped to an S3 Prefix.
+ //
+ // Parameter
+ // is required.
+ Prefix *string
+
+ // The Copy Source used for Copy Object request. This is an optional parameter that
+ // will be set automatically for operations that are scoped to Copy
+ // Source.
+ //
+ // Parameter is required.
+ CopySource *string
+
+ // Internal parameter to disable Access Point Buckets
+ //
+ // Parameter is required.
+ DisableAccessPoints *bool
+
+ // Whether multi-region access points (MRAP) should be disabled.
+ //
+ // Defaults to false
+ // if no value is provided.
+ //
+ // AWS::S3::DisableMultiRegionAccessPoints
+ DisableMultiRegionAccessPoints *bool
+
+ // When an Access Point ARN is provided and this flag is enabled, the SDK MUST use
+ // the ARN's region when constructing the endpoint instead of the client's
+ // configured region.
+ //
+ // Parameter is required.
+ //
+ // AWS::S3::UseArnRegion
+ UseArnRegion *bool
+
+ // Internal parameter to indicate whether S3Express operation should use control
+ // plane, (ex. CreateBucket)
+ //
+ // Parameter is required.
+ UseS3ExpressControlEndpoint *bool
+
+ // Parameter to indicate whether S3Express session auth should be
+ // disabled
+ //
+ // Parameter is required.
+ DisableS3ExpressSessionAuth *bool
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+ if p.Accelerate == nil {
+ return fmt.Errorf("parameter Accelerate is required")
+ }
+
+ if p.DisableMultiRegionAccessPoints == nil {
+ return fmt.Errorf("parameter DisableMultiRegionAccessPoints is required")
+ }
+
+ if p.ForcePathStyle == nil {
+ return fmt.Errorf("parameter ForcePathStyle is required")
+ }
+
+ if p.UseDualStack == nil {
+ return fmt.Errorf("parameter UseDualStack is required")
+ }
+
+ if p.UseFIPS == nil {
+ return fmt.Errorf("parameter UseFIPS is required")
+ }
+
+ if p.UseGlobalEndpoint == nil {
+ return fmt.Errorf("parameter UseGlobalEndpoint is required")
+ }
+
+ return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+ if p.Accelerate == nil {
+ p.Accelerate = ptr.Bool(false)
+ }
+
+ if p.DisableMultiRegionAccessPoints == nil {
+ p.DisableMultiRegionAccessPoints = ptr.Bool(false)
+ }
+
+ if p.ForcePathStyle == nil {
+ p.ForcePathStyle = ptr.Bool(false)
+ }
+
+ if p.UseDualStack == nil {
+ p.UseDualStack = ptr.Bool(false)
+ }
+
+ if p.UseFIPS == nil {
+ p.UseFIPS = ptr.Bool(false)
+ }
+
+ if p.UseGlobalEndpoint == nil {
+ p.UseGlobalEndpoint = ptr.Bool(false)
+ }
+ return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+ // ResolveEndpoint attempts to resolve the endpoint with the provided options,
+ // returning the endpoint if found. Otherwise an error is returned.
+ ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+ smithyendpoints.Endpoint, error,
+ )
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+ return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+ ctx context.Context, params EndpointParameters,
+) (
+ endpoint smithyendpoints.Endpoint, err error,
+) {
+ params = params.WithDefaults()
+ if err = params.ValidateRequired(); err != nil {
+ return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+ }
+ _UseFIPS := *params.UseFIPS
+ _UseDualStack := *params.UseDualStack
+ _ForcePathStyle := *params.ForcePathStyle
+ _Accelerate := *params.Accelerate
+ _UseGlobalEndpoint := *params.UseGlobalEndpoint
+ _DisableMultiRegionAccessPoints := *params.DisableMultiRegionAccessPoints
+
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if _Accelerate == true {
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Accelerate cannot be used with FIPS")
+ }
+ }
+ if _UseDualStack == true {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Cannot set dual-stack in combination with a custom endpoint.")
+ }
+ }
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with FIPS")
+ }
+ }
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if _Accelerate == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with S3 Accelerate")
+ }
+ }
+ if _UseFIPS == true {
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _partitionResult := *exprVal
+ _ = _partitionResult
+ if _partitionResult.Name == "aws-cn" {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Partition does not support FIPS")
+ }
+ }
+ }
+ if exprVal := params.Bucket; exprVal != nil {
+ _Bucket := *exprVal
+ _ = _Bucket
+ if exprVal := rulesfn.SubString(_Bucket, 0, 6, true); exprVal != nil {
+ _bucketSuffix := *exprVal
+ _ = _bucketSuffix
+ if _bucketSuffix == "--x-s3" {
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support Dual-stack.")
+ }
+ if _Accelerate == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support S3 Accelerate.")
+ }
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil {
+ _DisableS3ExpressSessionAuth := *exprVal
+ _ = _DisableS3ExpressSessionAuth
+ if _DisableS3ExpressSessionAuth == true {
+ if _url.IsIp == true {
+ _uri_encoded_bucket := rulesfn.URIEncode(_Bucket)
+ _ = _uri_encoded_bucket
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_Bucket)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.")
+ }
+ }
+ if _url.IsIp == true {
+ _uri_encoded_bucket := rulesfn.URIEncode(_Bucket)
+ _ = _uri_encoded_bucket
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_Bucket)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.")
+ }
+ }
+ if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil {
+ _UseS3ExpressControlEndpoint := *exprVal
+ _ = _UseS3ExpressControlEndpoint
+ if _UseS3ExpressControlEndpoint == true {
+ _uri_encoded_bucket := rulesfn.URIEncode(_Bucket)
+ _ = _uri_encoded_bucket
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3express-control-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3express-control.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ }
+ if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) {
+ if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil {
+ _DisableS3ExpressSessionAuth := *exprVal
+ _ = _DisableS3ExpressSessionAuth
+ if _DisableS3ExpressSessionAuth == true {
+ if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.")
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil {
+ _s3expressAvailabilityZoneId := *exprVal
+ _ = _s3expressAvailabilityZoneId
+ if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil {
+ _s3expressAvailabilityZoneDelim := *exprVal
+ _ = _s3expressAvailabilityZoneDelim
+ if _s3expressAvailabilityZoneDelim == "--" {
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-fips-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3express-")
+ out.WriteString(_s3expressAvailabilityZoneId)
+ out.WriteString(".")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "sigv4-s3express",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.")
+ }
+ }
+ }
+ if !(params.Bucket != nil) {
+ if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil {
+ _UseS3ExpressControlEndpoint := *exprVal
+ _ = _UseS3ExpressControlEndpoint
+ if _UseS3ExpressControlEndpoint == true {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3express-control-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3express-control.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ out.Set("backend", "S3Express")
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3express")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3express")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ if exprVal := params.Bucket; exprVal != nil {
+ _Bucket := *exprVal
+ _ = _Bucket
+ if exprVal := rulesfn.SubString(_Bucket, 49, 50, true); exprVal != nil {
+ _hardwareType := *exprVal
+ _ = _hardwareType
+ if exprVal := rulesfn.SubString(_Bucket, 8, 12, true); exprVal != nil {
+ _regionPrefix := *exprVal
+ _ = _regionPrefix
+ if exprVal := rulesfn.SubString(_Bucket, 0, 7, true); exprVal != nil {
+ _bucketAliasSuffix := *exprVal
+ _ = _bucketAliasSuffix
+ if exprVal := rulesfn.SubString(_Bucket, 32, 49, true); exprVal != nil {
+ _outpostId := *exprVal
+ _ = _outpostId
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _regionPartition := *exprVal
+ _ = _regionPartition
+ if _bucketAliasSuffix == "--op-s3" {
+ if rulesfn.IsValidHostLabel(_outpostId, false) {
+ if _hardwareType == "e" {
+ if _regionPrefix == "beta" {
+ if !(params.Endpoint != nil) {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found")
+ }
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".ec2.")
+ out.WriteString(_url.Authority)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4a",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
+ return sp
+ }(),
+ },
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".ec2.s3-outposts.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_regionPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4a",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
+ return sp
+ }(),
+ },
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _hardwareType == "o" {
+ if _regionPrefix == "beta" {
+ if !(params.Endpoint != nil) {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found")
+ }
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".op-")
+ out.WriteString(_outpostId)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4a",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
+ return sp
+ }(),
+ },
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".op-")
+ out.WriteString(_outpostId)
+ out.WriteString(".s3-outposts.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_regionPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4a",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
+ return sp
+ }(),
+ },
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Unrecognized hardware type: \"Expected hardware type o or e but got ")
+ out.WriteString(_hardwareType)
+ out.WriteString("\"")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`.")
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if exprVal := params.Bucket; exprVal != nil {
+ _Bucket := *exprVal
+ _ = _Bucket
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if !(rulesfn.ParseURL(_Endpoint) != nil) {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Custom endpoint `")
+ out.WriteString(_Endpoint)
+ out.WriteString("` was not a valid URI")
+ return out.String()
+ }())
+ }
+ }
+ if _ForcePathStyle == false {
+ if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) {
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _partitionResult := *exprVal
+ _ = _partitionResult
+ if rulesfn.IsValidHostLabel(_Region, false) {
+ if _Accelerate == true {
+ if _partitionResult.Name == "aws-cn" {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Accelerate cannot be used in this region")
+ }
+ }
+ if _UseDualStack == true {
+ if _UseFIPS == true {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-fips.dualstack.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if _UseFIPS == true {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-fips.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if _UseFIPS == true {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-fips.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == true {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-fips.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == true {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == true {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if _UseFIPS == false {
+ if _Accelerate == true {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-accelerate.dualstack.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if _UseFIPS == false {
+ if _Accelerate == true {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-accelerate.dualstack.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if _UseFIPS == false {
+ if _Accelerate == true {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-accelerate.dualstack.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3.dualstack.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _url.IsIp == true {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.NormalizedPath)
+ out.WriteString(_Bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _url.IsIp == false {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_Bucket)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _url.IsIp == true {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ if _Region == "us-east-1" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.NormalizedPath)
+ out.WriteString(_Bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.NormalizedPath)
+ out.WriteString(_Bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _url.IsIp == false {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ if _Region == "us-east-1" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_Bucket)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_Bucket)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _url.IsIp == true {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.NormalizedPath)
+ out.WriteString(_Bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _url.IsIp == false {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_Bucket)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == true {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-accelerate.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == true {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ if _Region == "us-east-1" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-accelerate.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-accelerate.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == true {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3-accelerate.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ if _Region == "us-east-1" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if _UseFIPS == false {
+ if _Accelerate == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_Bucket)
+ out.WriteString(".s3.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ }
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _url.Scheme == "http" {
+ if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, true) {
+ if _ForcePathStyle == false {
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if _Accelerate == false {
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _partitionResult := *exprVal
+ _ = _partitionResult
+ if rulesfn.IsValidHostLabel(_Region, false) {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_Bucket)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if _ForcePathStyle == false {
+ if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil {
+ _bucketArn := *exprVal
+ _ = _bucketArn
+ if exprVal := _bucketArn.ResourceId.Get(0); exprVal != nil {
+ _arnType := *exprVal
+ _ = _arnType
+ if !(_arnType == "") {
+ if _bucketArn.Service == "s3-object-lambda" {
+ if _arnType == "accesspoint" {
+ if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil {
+ _accessPointName := *exprVal
+ _ = _accessPointName
+ if !(_accessPointName == "") {
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack")
+ }
+ if _Accelerate == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate")
+ }
+ if !(_bucketArn.Region == "") {
+ if exprVal := params.DisableAccessPoints; exprVal != nil {
+ _DisableAccessPoints := *exprVal
+ _ = _DisableAccessPoints
+ if _DisableAccessPoints == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation")
+ }
+ }
+ if !(_bucketArn.ResourceId.Get(2) != nil) {
+ if exprVal := params.UseArnRegion; exprVal != nil {
+ _UseArnRegion := *exprVal
+ _ = _UseArnRegion
+ if _UseArnRegion == false {
+ if !(_bucketArn.Region == _Region) {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid configuration: region from ARN `")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString("` does not match client region `")
+ out.WriteString(_Region)
+ out.WriteString("` and UseArnRegion is `false`")
+ return out.String()
+ }())
+ }
+ }
+ }
+ if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil {
+ _bucketPartition := *exprVal
+ _ = _bucketPartition
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _partitionResult := *exprVal
+ _ = _partitionResult
+ if _bucketPartition.Name == _partitionResult.Name {
+ if rulesfn.IsValidHostLabel(_bucketArn.Region, true) {
+ if _bucketArn.AccountId == "" {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Missing account id")
+ }
+ if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) {
+ if rulesfn.IsValidHostLabel(_accessPointName, false) {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".s3-object-lambda-fips.")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString(".")
+ out.WriteString(_bucketPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".s3-object-lambda.")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString(".")
+ out.WriteString(_bucketPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `")
+ out.WriteString(_accessPointName)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid region in ARN: `")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString("` (invalid DNS name)")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Client was configured for partition `")
+ out.WriteString(_partitionResult.Name)
+ out.WriteString("` but ARN (`")
+ out.WriteString(_Bucket)
+ out.WriteString("`) has `")
+ out.WriteString(_bucketPartition.Name)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: bucket ARN is missing a region")
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: Object Lambda ARNs only support `accesspoint` arn types, but found: `")
+ out.WriteString(_arnType)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ if _arnType == "accesspoint" {
+ if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil {
+ _accessPointName := *exprVal
+ _ = _accessPointName
+ if !(_accessPointName == "") {
+ if !(_bucketArn.Region == "") {
+ if _arnType == "accesspoint" {
+ if !(_bucketArn.Region == "") {
+ if exprVal := params.DisableAccessPoints; exprVal != nil {
+ _DisableAccessPoints := *exprVal
+ _ = _DisableAccessPoints
+ if _DisableAccessPoints == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation")
+ }
+ }
+ if !(_bucketArn.ResourceId.Get(2) != nil) {
+ if exprVal := params.UseArnRegion; exprVal != nil {
+ _UseArnRegion := *exprVal
+ _ = _UseArnRegion
+ if _UseArnRegion == false {
+ if !(_bucketArn.Region == _Region) {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid configuration: region from ARN `")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString("` does not match client region `")
+ out.WriteString(_Region)
+ out.WriteString("` and UseArnRegion is `false`")
+ return out.String()
+ }())
+ }
+ }
+ }
+ if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil {
+ _bucketPartition := *exprVal
+ _ = _bucketPartition
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _partitionResult := *exprVal
+ _ = _partitionResult
+ if _bucketPartition.Name == _partitionResult.Name {
+ if rulesfn.IsValidHostLabel(_bucketArn.Region, true) {
+ if _bucketArn.Service == "s3" {
+ if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) {
+ if rulesfn.IsValidHostLabel(_accessPointName, false) {
+ if _Accelerate == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Access Points do not support S3 Accelerate")
+ }
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".s3-accesspoint-fips.dualstack.")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString(".")
+ out.WriteString(_bucketPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ if _UseFIPS == true {
+ if _UseDualStack == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".s3-accesspoint-fips.")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString(".")
+ out.WriteString(_bucketPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".s3-accesspoint.dualstack.")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString(".")
+ out.WriteString(_bucketPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".s3-accesspoint.")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString(".")
+ out.WriteString(_bucketPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `")
+ out.WriteString(_accessPointName)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: The ARN was not for the S3 service, found: ")
+ out.WriteString(_bucketArn.Service)
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid region in ARN: `")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString("` (invalid DNS name)")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Client was configured for partition `")
+ out.WriteString(_partitionResult.Name)
+ out.WriteString("` but ARN (`")
+ out.WriteString(_Bucket)
+ out.WriteString("`) has `")
+ out.WriteString(_bucketPartition.Name)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ if rulesfn.IsValidHostLabel(_accessPointName, true) {
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support dual-stack")
+ }
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support FIPS")
+ }
+ if _Accelerate == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support S3 Accelerate")
+ }
+ if _DisableMultiRegionAccessPoints == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid configuration: Multi-Region Access Point ARNs are disabled.")
+ }
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _mrapPartition := *exprVal
+ _ = _mrapPartition
+ if _mrapPartition.Name == _bucketArn.Partition {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_accessPointName)
+ out.WriteString(".accesspoint.s3-global.")
+ out.WriteString(_mrapPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4a",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Client was configured for partition `")
+ out.WriteString(_mrapPartition.Name)
+ out.WriteString("` but bucket referred to partition `")
+ out.WriteString(_bucketArn.Partition)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Access Point Name")
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided")
+ }
+ if _bucketArn.Service == "s3-outposts" {
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support Dual-stack")
+ }
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support FIPS")
+ }
+ if _Accelerate == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support S3 Accelerate")
+ }
+ if exprVal := _bucketArn.ResourceId.Get(4); exprVal != nil {
+ _var_287 := *exprVal
+ _ = _var_287
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Arn: Outpost Access Point ARN contains sub resources")
+ }
+ if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil {
+ _outpostId := *exprVal
+ _ = _outpostId
+ if rulesfn.IsValidHostLabel(_outpostId, false) {
+ if exprVal := params.UseArnRegion; exprVal != nil {
+ _UseArnRegion := *exprVal
+ _ = _UseArnRegion
+ if _UseArnRegion == false {
+ if !(_bucketArn.Region == _Region) {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid configuration: region from ARN `")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString("` does not match client region `")
+ out.WriteString(_Region)
+ out.WriteString("` and UseArnRegion is `false`")
+ return out.String()
+ }())
+ }
+ }
+ }
+ if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil {
+ _bucketPartition := *exprVal
+ _ = _bucketPartition
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _partitionResult := *exprVal
+ _ = _partitionResult
+ if _bucketPartition.Name == _partitionResult.Name {
+ if rulesfn.IsValidHostLabel(_bucketArn.Region, true) {
+ if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) {
+ if exprVal := _bucketArn.ResourceId.Get(2); exprVal != nil {
+ _outpostType := *exprVal
+ _ = _outpostType
+ if exprVal := _bucketArn.ResourceId.Get(3); exprVal != nil {
+ _accessPointName := *exprVal
+ _ = _accessPointName
+ if _outpostType == "accesspoint" {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".")
+ out.WriteString(_outpostId)
+ out.WriteString(".")
+ out.WriteString(_url.Authority)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4a",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
+ return sp
+ }(),
+ },
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_accessPointName)
+ out.WriteString("-")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString(".")
+ out.WriteString(_outpostId)
+ out.WriteString(".s3-outposts.")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString(".")
+ out.WriteString(_bucketPartition.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4a",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
+ return sp
+ }(),
+ },
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Expected an outpost type `accesspoint`, found ")
+ out.WriteString(_outpostType)
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: expected an access point name")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a 4-component resource")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `")
+ out.WriteString(_bucketArn.AccountId)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid region in ARN: `")
+ out.WriteString(_bucketArn.Region)
+ out.WriteString("` (invalid DNS name)")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Client was configured for partition `")
+ out.WriteString(_partitionResult.Name)
+ out.WriteString("` but ARN (`")
+ out.WriteString(_Bucket)
+ out.WriteString("`) has `")
+ out.WriteString(_bucketPartition.Name)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: The outpost Id may only contain a-z, A-Z, 0-9 and `-`. Found: `")
+ out.WriteString(_outpostId)
+ out.WriteString("`")
+ return out.String()
+ }())
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The Outpost Id was not set")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: Unrecognized format: ")
+ out.WriteString(_Bucket)
+ out.WriteString(" (type: ")
+ out.WriteString(_arnType)
+ out.WriteString(")")
+ return out.String()
+ }())
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: No ARN type specified")
+ }
+ }
+ if exprVal := rulesfn.SubString(_Bucket, 0, 4, false); exprVal != nil {
+ _arnPrefix := *exprVal
+ _ = _arnPrefix
+ if _arnPrefix == "arn:" {
+ if !(awsrulesfn.ParseARN(_Bucket) != nil) {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", func() string {
+ var out strings.Builder
+ out.WriteString("Invalid ARN: `")
+ out.WriteString(_Bucket)
+ out.WriteString("` was not a valid ARN")
+ return out.String()
+ }())
+ }
+ }
+ }
+ if _ForcePathStyle == true {
+ if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil {
+ _var_300 := *exprVal
+ _ = _var_300
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with ARN buckets")
+ }
+ }
+ _uri_encoded_bucket := rulesfn.URIEncode(_Bucket)
+ _ = _uri_encoded_bucket
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _partitionResult := *exprVal
+ _ = _partitionResult
+ if _Accelerate == false {
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == true {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.dualstack.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == true {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == true {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == true {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == true {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == true {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == false {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.dualstack.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == false {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == false {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _UseFIPS == false {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.NormalizedPath)
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _UseFIPS == false {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ if _Region == "us-east-1" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.NormalizedPath)
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.NormalizedPath)
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _UseFIPS == false {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.NormalizedPath)
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == false {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == false {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ if _Region == "us-east-1" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if _UseFIPS == false {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ out.WriteString("/")
+ out.WriteString(_uri_encoded_bucket)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with S3 Accelerate")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ if exprVal := params.UseObjectLambdaEndpoint; exprVal != nil {
+ _UseObjectLambdaEndpoint := *exprVal
+ _ = _UseObjectLambdaEndpoint
+ if _UseObjectLambdaEndpoint == true {
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _partitionResult := *exprVal
+ _ = _partitionResult
+ if rulesfn.IsValidHostLabel(_Region, true) {
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack")
+ }
+ if _Accelerate == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate")
+ }
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ if _UseFIPS == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-object-lambda-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-object-lambda.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ }
+ if !(params.Bucket != nil) {
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _partitionResult := *exprVal
+ _ = _partitionResult
+ if rulesfn.IsValidHostLabel(_Region, true) {
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.dualstack.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == true {
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ if _UseFIPS == true {
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == true {
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.dualstack.us-east-1.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == true {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.dualstack.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ if _Region == "us-east-1" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil {
+ _url := *exprVal
+ _ = _url
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString(_url.Scheme)
+ out.WriteString("://")
+ out.WriteString(_url.Authority)
+ out.WriteString(_url.Path)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if _Region == "aws-global" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == true {
+ if _Region == "us-east-1" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if !(params.Endpoint != nil) {
+ if !(_Region == "aws-global") {
+ if _UseGlobalEndpoint == false {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://s3.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_partitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetDisableDoubleEncoding(&sp, true)
+
+ smithyhttp.SetSigV4SigningName(&sp, "s3")
+ smithyhttp.SetSigV4ASigningName(&sp, "s3")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "A region must be set when sending requests to S3.")
+}
+
+type endpointParamsBinder interface {
+ bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+ params := &EndpointParameters{}
+
+ params.Region = bindRegion(options.Region)
+ params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+ params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+ params.Endpoint = options.BaseEndpoint
+ params.ForcePathStyle = aws.Bool(options.UsePathStyle)
+ params.Accelerate = aws.Bool(options.UseAccelerate)
+ params.DisableMultiRegionAccessPoints = aws.Bool(options.DisableMultiRegionAccessPoints)
+ params.UseArnRegion = aws.Bool(options.UseARNRegion)
+
+ params.DisableS3ExpressSessionAuth = options.DisableS3ExpressSessionAuth
+
+ if b, ok := input.(endpointParamsBinder); ok {
+ b.bindEndpointParams(params)
+ }
+
+ return params
+}
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveEndpoint")
+ defer span.End()
+
+ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.options.EndpointResolverV2 == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+ endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration",
+ func() (smithyendpoints.Endpoint, error) {
+ return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ span.SetProperty("client.call.resolved_endpoint", endpt.URI.String())
+
+ if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+ endpt.URI.RawPath = endpt.URI.Path
+ }
+ req.URL.Scheme = endpt.URI.Scheme
+ req.URL.Host = endpt.URI.Host
+ req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+ req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+ for k := range endpt.Headers {
+ req.Header.Set(k, endpt.Headers.Get(k))
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+ for _, o := range opts {
+ rscheme.SignerProperties.SetAll(&o.SignerProperties)
+ }
+
+ ctx = setS3ResolvedURI(ctx, endpt.URI.String())
+
+ backend := s3cust.GetPropertiesBackend(&endpt.Properties)
+ ctx = internalcontext.SetS3Backend(ctx, backend)
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go
new file mode 100644
index 000000000..d6cdb5337
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/eventstream.go
@@ -0,0 +1,285 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ smithysync "github.com/aws/smithy-go/sync"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "io/ioutil"
+ "sync"
+)
+
+// SelectObjectContentEventStreamReader provides the interface for reading events
+// from a stream.
+//
+// The writer's Close method must allow multiple concurrent calls.
+type SelectObjectContentEventStreamReader interface {
+ Events() <-chan types.SelectObjectContentEventStream
+ Close() error
+ Err() error
+}
+
+type selectObjectContentEventStreamReader struct {
+ stream chan types.SelectObjectContentEventStream
+ decoder *eventstream.Decoder
+ eventStream io.ReadCloser
+ err *smithysync.OnceErr
+ payloadBuf []byte
+ done chan struct{}
+ closeOnce sync.Once
+}
+
+func newSelectObjectContentEventStreamReader(readCloser io.ReadCloser, decoder *eventstream.Decoder) *selectObjectContentEventStreamReader {
+ w := &selectObjectContentEventStreamReader{
+ stream: make(chan types.SelectObjectContentEventStream),
+ decoder: decoder,
+ eventStream: readCloser,
+ err: smithysync.NewOnceErr(),
+ done: make(chan struct{}),
+ payloadBuf: make([]byte, 10*1024),
+ }
+
+ go w.readEventStream()
+
+ return w
+}
+
+func (r *selectObjectContentEventStreamReader) Events() <-chan types.SelectObjectContentEventStream {
+ return r.stream
+}
+
+func (r *selectObjectContentEventStreamReader) readEventStream() {
+ defer r.Close()
+ defer close(r.stream)
+
+ for {
+ r.payloadBuf = r.payloadBuf[0:0]
+ decodedMessage, err := r.decoder.Decode(r.eventStream, r.payloadBuf)
+ if err != nil {
+ if err == io.EOF {
+ return
+ }
+ select {
+ case <-r.done:
+ return
+ default:
+ r.err.SetError(err)
+ return
+ }
+ }
+
+ event, err := r.deserializeEventMessage(&decodedMessage)
+ if err != nil {
+ r.err.SetError(err)
+ return
+ }
+
+ select {
+ case r.stream <- event:
+ case <-r.done:
+ return
+ }
+
+ }
+}
+
+func (r *selectObjectContentEventStreamReader) deserializeEventMessage(msg *eventstream.Message) (types.SelectObjectContentEventStream, error) {
+ messageType := msg.Headers.Get(eventstreamapi.MessageTypeHeader)
+ if messageType == nil {
+ return nil, fmt.Errorf("%s event header not present", eventstreamapi.MessageTypeHeader)
+ }
+
+ switch messageType.String() {
+ case eventstreamapi.EventMessageType:
+ var v types.SelectObjectContentEventStream
+ if err := awsRestxml_deserializeEventStreamSelectObjectContentEventStream(&v, msg); err != nil {
+ return nil, err
+ }
+ return v, nil
+
+ case eventstreamapi.ExceptionMessageType:
+ return nil, awsRestxml_deserializeEventStreamExceptionSelectObjectContentEventStream(msg)
+
+ case eventstreamapi.ErrorMessageType:
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+ if header := msg.Headers.Get(eventstreamapi.ErrorCodeHeader); header != nil {
+ errorCode = header.String()
+ }
+ if header := msg.Headers.Get(eventstreamapi.ErrorMessageHeader); header != nil {
+ errorMessage = header.String()
+ }
+ return nil, &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+
+ default:
+ mc := msg.Clone()
+ return nil, &UnknownEventMessageError{
+ Type: messageType.String(),
+ Message: &mc,
+ }
+
+ }
+}
+
+func (r *selectObjectContentEventStreamReader) ErrorSet() <-chan struct{} {
+ return r.err.ErrorSet()
+}
+
+func (r *selectObjectContentEventStreamReader) Close() error {
+ r.closeOnce.Do(r.safeClose)
+ return r.Err()
+}
+
+func (r *selectObjectContentEventStreamReader) safeClose() {
+ close(r.done)
+ r.eventStream.Close()
+
+}
+
+func (r *selectObjectContentEventStreamReader) Err() error {
+ return r.err.Err()
+}
+
+func (r *selectObjectContentEventStreamReader) Closed() <-chan struct{} {
+ return r.done
+}
+
+type awsRestxml_deserializeOpEventStreamSelectObjectContent struct {
+ LogEventStreamWrites bool
+ LogEventStreamReads bool
+}
+
+func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) ID() string {
+ return "OperationEventStreamDeserializer"
+}
+
+func (m *awsRestxml_deserializeOpEventStreamSelectObjectContent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ defer func() {
+ if err == nil {
+ return
+ }
+ m.closeResponseBody(out)
+ }()
+
+ logger := middleware.GetLogger(ctx)
+
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request)
+ }
+ _ = request
+
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ deserializeOutput, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse)
+ }
+ _ = deserializeOutput
+
+ output, ok := out.Result.(*SelectObjectContentOutput)
+ if out.Result != nil && !ok {
+ return out, metadata, fmt.Errorf("unexpected output result type: %T", out.Result)
+ } else if out.Result == nil {
+ output = &SelectObjectContentOutput{}
+ out.Result = output
+ }
+
+ eventReader := newSelectObjectContentEventStreamReader(
+ deserializeOutput.Body,
+ eventstream.NewDecoder(func(options *eventstream.DecoderOptions) {
+ options.Logger = logger
+ options.LogMessages = m.LogEventStreamReads
+
+ }),
+ )
+ defer func() {
+ if err == nil {
+ return
+ }
+ _ = eventReader.Close()
+ }()
+
+ output.eventStream = NewSelectObjectContentEventStream(func(stream *SelectObjectContentEventStream) {
+ stream.Reader = eventReader
+ })
+
+ go output.eventStream.waitStreamClose()
+
+ return out, metadata, nil
+}
+
+func (*awsRestxml_deserializeOpEventStreamSelectObjectContent) closeResponseBody(out middleware.DeserializeOutput) {
+ if resp, ok := out.RawResponse.(*smithyhttp.Response); ok && resp != nil && resp.Body != nil {
+ _, _ = io.Copy(ioutil.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func addEventStreamSelectObjectContentMiddleware(stack *middleware.Stack, options Options) error {
+ if err := stack.Deserialize.Insert(&awsRestxml_deserializeOpEventStreamSelectObjectContent{
+ LogEventStreamWrites: options.ClientLogMode.IsRequestEventMessage(),
+ LogEventStreamReads: options.ClientLogMode.IsResponseEventMessage(),
+ }, "OperationDeserializer", middleware.Before); err != nil {
+ return err
+ }
+ return nil
+
+}
+
+// UnknownEventMessageError provides an error when a message is received from the stream,
+// but the reader is unable to determine what kind of message it is.
+type UnknownEventMessageError struct {
+ Type string
+ Message *eventstream.Message
+}
+
+// Error retruns the error message string.
+func (e *UnknownEventMessageError) Error() string {
+ return "unknown event stream message type, " + e.Type
+}
+
+func setSafeEventStreamClientLogMode(o *Options, operation string) {
+ switch operation {
+ case "SelectObjectContent":
+ toggleEventStreamClientLogMode(o, false, true)
+ return
+
+ default:
+ return
+
+ }
+}
+func toggleEventStreamClientLogMode(o *Options, request, response bool) {
+ mode := o.ClientLogMode
+
+ if request && mode.IsRequestWithBody() {
+ mode.ClearRequestWithBody()
+ mode |= aws.LogRequest
+ }
+
+ if response && mode.IsResponseWithBody() {
+ mode.ClearResponseWithBody()
+ mode |= aws.LogResponse
+ }
+
+ o.ClientLogMode = mode
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go
new file mode 100644
index 000000000..bbac9ca27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go
@@ -0,0 +1,9 @@
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+)
+
+// ExpressCredentialsProvider retrieves credentials for operations against the
+// S3Express storage class.
+type ExpressCredentialsProvider = customizations.S3ExpressCredentialsProvider
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go
new file mode 100644
index 000000000..3b35a3e57
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go
@@ -0,0 +1,170 @@
+package s3
+
+import (
+ "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight"
+ "github.com/aws/smithy-go/container/private/cache"
+ "github.com/aws/smithy-go/container/private/cache/lru"
+)
+
+const s3ExpressCacheCap = 100
+
+const s3ExpressRefreshWindow = 1 * time.Minute
+
+type cacheKey struct {
+ CredentialsHash string // hmac(sigv4 akid, sigv4 secret)
+ Bucket string
+}
+
+func (c cacheKey) Slug() string {
+ return fmt.Sprintf("%s%s", c.CredentialsHash, c.Bucket)
+}
+
+type sessionCredsCache struct {
+ mu sync.Mutex
+ cache cache.Cache
+}
+
+func (c *sessionCredsCache) Get(key cacheKey) (*aws.Credentials, bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if v, ok := c.cache.Get(key); ok {
+ return v.(*aws.Credentials), true
+ }
+ return nil, false
+}
+
+func (c *sessionCredsCache) Put(key cacheKey, creds *aws.Credentials) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ c.cache.Put(key, creds)
+}
+
+// The default S3Express provider uses an LRU cache with a capacity of 100.
+//
+// Credentials will be refreshed asynchronously when a Retrieve() call is made
+// for cached credentials within an expiry window (1 minute, currently
+// non-configurable).
+type defaultS3ExpressCredentialsProvider struct {
+ sf singleflight.Group
+
+ client createSessionAPIClient
+ cache *sessionCredsCache
+ refreshWindow time.Duration
+ v4creds aws.CredentialsProvider // underlying credentials used for CreateSession
+}
+
+type createSessionAPIClient interface {
+ CreateSession(context.Context, *CreateSessionInput, ...func(*Options)) (*CreateSessionOutput, error)
+}
+
+func newDefaultS3ExpressCredentialsProvider() *defaultS3ExpressCredentialsProvider {
+ return &defaultS3ExpressCredentialsProvider{
+ cache: &sessionCredsCache{
+ cache: lru.New(s3ExpressCacheCap),
+ },
+ refreshWindow: s3ExpressRefreshWindow,
+ }
+}
+
+// returns a cloned provider using new base credentials, used when per-op
+// config mutations change the credentials provider
+func (p *defaultS3ExpressCredentialsProvider) CloneWithBaseCredentials(v4creds aws.CredentialsProvider) *defaultS3ExpressCredentialsProvider {
+ return &defaultS3ExpressCredentialsProvider{
+ client: p.client,
+ cache: p.cache,
+ refreshWindow: p.refreshWindow,
+ v4creds: v4creds,
+ }
+}
+
+func (p *defaultS3ExpressCredentialsProvider) Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) {
+ v4creds, err := p.v4creds.Retrieve(ctx)
+ if err != nil {
+ return aws.Credentials{}, fmt.Errorf("get sigv4 creds: %w", err)
+ }
+
+ key := cacheKey{
+ CredentialsHash: gethmac(v4creds.AccessKeyID, v4creds.SecretAccessKey),
+ Bucket: bucket,
+ }
+ creds, ok := p.cache.Get(key)
+ if !ok || creds.Expired() {
+ return p.awaitDoChanRetrieve(ctx, key)
+ }
+
+ if creds.Expires.Sub(sdk.NowTime()) <= p.refreshWindow {
+ p.doChanRetrieve(ctx, key)
+ }
+
+ return *creds, nil
+}
+
+func (p *defaultS3ExpressCredentialsProvider) doChanRetrieve(ctx context.Context, key cacheKey) <-chan singleflight.Result {
+ return p.sf.DoChan(key.Slug(), func() (interface{}, error) {
+ return p.retrieve(ctx, key)
+ })
+}
+
+func (p *defaultS3ExpressCredentialsProvider) awaitDoChanRetrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) {
+ ch := p.doChanRetrieve(ctx, key)
+
+ select {
+ case r := <-ch:
+ return r.Val.(aws.Credentials), r.Err
+ case <-ctx.Done():
+ return aws.Credentials{}, errors.New("s3express retrieve credentials canceled")
+ }
+}
+
+func (p *defaultS3ExpressCredentialsProvider) retrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) {
+ resp, err := p.client.CreateSession(ctx, &CreateSessionInput{
+ Bucket: aws.String(key.Bucket),
+ })
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+
+ creds, err := credentialsFromResponse(resp)
+ if err != nil {
+ return aws.Credentials{}, err
+ }
+
+ p.cache.Put(key, creds)
+ return *creds, nil
+}
+
+func credentialsFromResponse(o *CreateSessionOutput) (*aws.Credentials, error) {
+ if o.Credentials == nil {
+ return nil, errors.New("s3express session credentials unset")
+ }
+
+ if o.Credentials.AccessKeyId == nil || o.Credentials.SecretAccessKey == nil || o.Credentials.SessionToken == nil || o.Credentials.Expiration == nil {
+ return nil, errors.New("s3express session credentials missing one or more required fields")
+ }
+
+ return &aws.Credentials{
+ AccessKeyID: *o.Credentials.AccessKeyId,
+ SecretAccessKey: *o.Credentials.SecretAccessKey,
+ SessionToken: *o.Credentials.SessionToken,
+ CanExpire: true,
+ Expires: *o.Credentials.Expiration,
+ }, nil
+}
+
+func gethmac(p, key string) string {
+ hash := hmac.New(sha256.New, []byte(key))
+ hash.Write([]byte(p))
+ return string(hash.Sum(nil))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go
new file mode 100644
index 000000000..7c7a7b424
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go
@@ -0,0 +1,39 @@
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+)
+
+// If the caller hasn't provided an S3Express provider, we use our default
+// which will grab a reference to the S3 client itself in finalization.
+func resolveExpressCredentials(o *Options) {
+ if o.ExpressCredentials == nil {
+ o.ExpressCredentials = newDefaultS3ExpressCredentialsProvider()
+ }
+}
+
+// Config finalizer: if we're using the default S3Express implementation, grab
+// a reference to the client for its CreateSession API, and the underlying
+// sigv4 credentials provider for cache keying.
+func finalizeExpressCredentials(o *Options, c *Client) {
+ if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok {
+ p.client = c
+ p.v4creds = o.Credentials
+ }
+}
+
+// Operation config finalizer: update the sigv4 credentials on the default
+// express provider in case it changed to ensure different cache keys
+func finalizeOperationExpressCredentials(o *Options, c Client) {
+ if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok {
+ o.ExpressCredentials = p.CloneWithBaseCredentials(o.Credentials)
+ }
+}
+
+// NewFromConfig resolver: pull from opaque sources if it exists.
+func resolveDisableExpressAuth(cfg aws.Config, o *Options) {
+ if v, ok := customizations.ResolveDisableExpressAuth(cfg.ConfigSources); ok {
+ o.DisableS3ExpressSessionAuth = &v
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go
new file mode 100644
index 000000000..a9b54535b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go
@@ -0,0 +1,43 @@
+package s3
+
+import (
+ "context"
+ "strings"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// isExpressUserAgent tracks whether the caller is using S3 Express
+//
+// we can only derive this at runtime, so the middleware needs to hold a handle
+// to the underlying user-agent manipulator to set the feature flag as
+// necessary
+type isExpressUserAgent struct {
+ ua *awsmiddleware.RequestUserAgent
+}
+
+func (*isExpressUserAgent) ID() string {
+ return "isExpressUserAgent"
+}
+
+func (m *isExpressUserAgent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ const expressSuffix = "--x-s3"
+
+ bucket, ok := bucketFromInput(in.Parameters)
+ if ok && strings.HasSuffix(bucket, expressSuffix) {
+ m.ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureS3ExpressBucket)
+ }
+ return next.HandleSerialize(ctx, in)
+}
+
+func addIsExpressUserAgent(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ return stack.Serialize.Add(&isExpressUserAgent{ua}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json
new file mode 100644
index 000000000..fa0119bcb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json
@@ -0,0 +1,139 @@
+{
+ "dependencies": {
+ "github.com/aws/aws-sdk-go-v2": "v1.4.0",
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream": "v0.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/internal/v4a": "v0.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5",
+ "github.com/aws/aws-sdk-go-v2/service/internal/checksum": "v0.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7",
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared": "v1.2.3",
+ "github.com/aws/smithy-go": "v1.4.0"
+ },
+ "files": [
+ "api_client.go",
+ "api_client_test.go",
+ "api_op_AbortMultipartUpload.go",
+ "api_op_CompleteMultipartUpload.go",
+ "api_op_CopyObject.go",
+ "api_op_CreateBucket.go",
+ "api_op_CreateBucketMetadataTableConfiguration.go",
+ "api_op_CreateMultipartUpload.go",
+ "api_op_CreateSession.go",
+ "api_op_DeleteBucket.go",
+ "api_op_DeleteBucketAnalyticsConfiguration.go",
+ "api_op_DeleteBucketCors.go",
+ "api_op_DeleteBucketEncryption.go",
+ "api_op_DeleteBucketIntelligentTieringConfiguration.go",
+ "api_op_DeleteBucketInventoryConfiguration.go",
+ "api_op_DeleteBucketLifecycle.go",
+ "api_op_DeleteBucketMetadataTableConfiguration.go",
+ "api_op_DeleteBucketMetricsConfiguration.go",
+ "api_op_DeleteBucketOwnershipControls.go",
+ "api_op_DeleteBucketPolicy.go",
+ "api_op_DeleteBucketReplication.go",
+ "api_op_DeleteBucketTagging.go",
+ "api_op_DeleteBucketWebsite.go",
+ "api_op_DeleteObject.go",
+ "api_op_DeleteObjectTagging.go",
+ "api_op_DeleteObjects.go",
+ "api_op_DeletePublicAccessBlock.go",
+ "api_op_GetBucketAccelerateConfiguration.go",
+ "api_op_GetBucketAcl.go",
+ "api_op_GetBucketAnalyticsConfiguration.go",
+ "api_op_GetBucketCors.go",
+ "api_op_GetBucketEncryption.go",
+ "api_op_GetBucketIntelligentTieringConfiguration.go",
+ "api_op_GetBucketInventoryConfiguration.go",
+ "api_op_GetBucketLifecycleConfiguration.go",
+ "api_op_GetBucketLocation.go",
+ "api_op_GetBucketLogging.go",
+ "api_op_GetBucketMetadataTableConfiguration.go",
+ "api_op_GetBucketMetricsConfiguration.go",
+ "api_op_GetBucketNotificationConfiguration.go",
+ "api_op_GetBucketOwnershipControls.go",
+ "api_op_GetBucketPolicy.go",
+ "api_op_GetBucketPolicyStatus.go",
+ "api_op_GetBucketReplication.go",
+ "api_op_GetBucketRequestPayment.go",
+ "api_op_GetBucketTagging.go",
+ "api_op_GetBucketVersioning.go",
+ "api_op_GetBucketWebsite.go",
+ "api_op_GetObject.go",
+ "api_op_GetObjectAcl.go",
+ "api_op_GetObjectAttributes.go",
+ "api_op_GetObjectLegalHold.go",
+ "api_op_GetObjectLockConfiguration.go",
+ "api_op_GetObjectRetention.go",
+ "api_op_GetObjectTagging.go",
+ "api_op_GetObjectTorrent.go",
+ "api_op_GetPublicAccessBlock.go",
+ "api_op_HeadBucket.go",
+ "api_op_HeadObject.go",
+ "api_op_ListBucketAnalyticsConfigurations.go",
+ "api_op_ListBucketIntelligentTieringConfigurations.go",
+ "api_op_ListBucketInventoryConfigurations.go",
+ "api_op_ListBucketMetricsConfigurations.go",
+ "api_op_ListBuckets.go",
+ "api_op_ListDirectoryBuckets.go",
+ "api_op_ListMultipartUploads.go",
+ "api_op_ListObjectVersions.go",
+ "api_op_ListObjects.go",
+ "api_op_ListObjectsV2.go",
+ "api_op_ListParts.go",
+ "api_op_PutBucketAccelerateConfiguration.go",
+ "api_op_PutBucketAcl.go",
+ "api_op_PutBucketAnalyticsConfiguration.go",
+ "api_op_PutBucketCors.go",
+ "api_op_PutBucketEncryption.go",
+ "api_op_PutBucketIntelligentTieringConfiguration.go",
+ "api_op_PutBucketInventoryConfiguration.go",
+ "api_op_PutBucketLifecycleConfiguration.go",
+ "api_op_PutBucketLogging.go",
+ "api_op_PutBucketMetricsConfiguration.go",
+ "api_op_PutBucketNotificationConfiguration.go",
+ "api_op_PutBucketOwnershipControls.go",
+ "api_op_PutBucketPolicy.go",
+ "api_op_PutBucketReplication.go",
+ "api_op_PutBucketRequestPayment.go",
+ "api_op_PutBucketTagging.go",
+ "api_op_PutBucketVersioning.go",
+ "api_op_PutBucketWebsite.go",
+ "api_op_PutObject.go",
+ "api_op_PutObjectAcl.go",
+ "api_op_PutObjectLegalHold.go",
+ "api_op_PutObjectLockConfiguration.go",
+ "api_op_PutObjectRetention.go",
+ "api_op_PutObjectTagging.go",
+ "api_op_PutPublicAccessBlock.go",
+ "api_op_RestoreObject.go",
+ "api_op_SelectObjectContent.go",
+ "api_op_UploadPart.go",
+ "api_op_UploadPartCopy.go",
+ "api_op_WriteGetObjectResponse.go",
+ "auth.go",
+ "deserializers.go",
+ "doc.go",
+ "endpoints.go",
+ "endpoints_config_test.go",
+ "endpoints_test.go",
+ "eventstream.go",
+ "generated.json",
+ "internal/endpoints/endpoints.go",
+ "internal/endpoints/endpoints_test.go",
+ "options.go",
+ "protocol_test.go",
+ "serializers.go",
+ "snapshot_test.go",
+ "sra_operation_order_test.go",
+ "types/enums.go",
+ "types/errors.go",
+ "types/types.go",
+ "types/types_exported_test.go",
+ "validators.go"
+ ],
+ "go": "1.22",
+ "module": "github.com/aws/aws-sdk-go-v2/service/s3",
+ "unstable": false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
new file mode 100644
index 000000000..5831a91c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package s3
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.78.2"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go
new file mode 100644
index 000000000..6aae79e7c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go
@@ -0,0 +1,214 @@
+package s3
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+// ListObjectVersionsAPIClient is a client that implements the ListObjectVersions
+// operation
+type ListObjectVersionsAPIClient interface {
+ ListObjectVersions(context.Context, *ListObjectVersionsInput, ...func(*Options)) (*ListObjectVersionsOutput, error)
+}
+
+var _ ListObjectVersionsAPIClient = (*Client)(nil)
+
+// ListObjectVersionsPaginatorOptions is the paginator options for ListObjectVersions
+type ListObjectVersionsPaginatorOptions struct {
+ // (Optional) The maximum number of Object Versions that you want Amazon S3 to
+ // return.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListObjectVersionsPaginator is a paginator for ListObjectVersions
+type ListObjectVersionsPaginator struct {
+ options ListObjectVersionsPaginatorOptions
+ client ListObjectVersionsAPIClient
+ params *ListObjectVersionsInput
+ firstPage bool
+ keyMarker *string
+ versionIDMarker *string
+ isTruncated bool
+}
+
+// NewListObjectVersionsPaginator returns a new ListObjectVersionsPaginator
+func NewListObjectVersionsPaginator(client ListObjectVersionsAPIClient, params *ListObjectVersionsInput, optFns ...func(*ListObjectVersionsPaginatorOptions)) *ListObjectVersionsPaginator {
+ if params == nil {
+ params = &ListObjectVersionsInput{}
+ }
+
+ options := ListObjectVersionsPaginatorOptions{}
+ if params.MaxKeys != nil {
+ options.Limit = aws.ToInt32(params.MaxKeys)
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListObjectVersionsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ keyMarker: params.KeyMarker,
+ versionIDMarker: params.VersionIdMarker,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListObjectVersionsPaginator) HasMorePages() bool {
+ return p.firstPage || p.isTruncated
+}
+
+// NextPage retrieves the next ListObjectVersions page.
+func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.KeyMarker = p.keyMarker
+ params.VersionIdMarker = p.versionIDMarker
+
+ var limit int32
+ if p.options.Limit > 0 {
+ limit = p.options.Limit
+ }
+ if limit > 0 {
+ params.MaxKeys = aws.Int32(limit)
+ }
+
+ result, err := p.client.ListObjectVersions(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.keyMarker
+ p.isTruncated = aws.ToBool(result.IsTruncated)
+ p.keyMarker = nil
+ p.versionIDMarker = nil
+ if aws.ToBool(result.IsTruncated) {
+ p.keyMarker = result.NextKeyMarker
+ p.versionIDMarker = result.NextVersionIdMarker
+ }
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.keyMarker != nil &&
+ *prevToken == *p.keyMarker {
+ p.isTruncated = false
+ }
+
+ return result, nil
+}
+
+// ListMultipartUploadsAPIClient is a client that implements the ListMultipartUploads
+// operation
+type ListMultipartUploadsAPIClient interface {
+ ListMultipartUploads(context.Context, *ListMultipartUploadsInput, ...func(*Options)) (*ListMultipartUploadsOutput, error)
+}
+
+var _ ListMultipartUploadsAPIClient = (*Client)(nil)
+
+// ListMultipartUploadsPaginatorOptions is the paginator options for ListMultipartUploads
+type ListMultipartUploadsPaginatorOptions struct {
+ // (Optional) The maximum number of Multipart Uploads that you want Amazon S3 to
+ // return.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListMultipartUploadsPaginator is a paginator for ListMultipartUploads
+type ListMultipartUploadsPaginator struct {
+ options ListMultipartUploadsPaginatorOptions
+ client ListMultipartUploadsAPIClient
+ params *ListMultipartUploadsInput
+ firstPage bool
+ keyMarker *string
+ uploadIDMarker *string
+ isTruncated bool
+}
+
+// NewListMultipartUploadsPaginator returns a new ListMultipartUploadsPaginator
+func NewListMultipartUploadsPaginator(client ListMultipartUploadsAPIClient, params *ListMultipartUploadsInput, optFns ...func(*ListMultipartUploadsPaginatorOptions)) *ListMultipartUploadsPaginator {
+ if params == nil {
+ params = &ListMultipartUploadsInput{}
+ }
+
+ options := ListMultipartUploadsPaginatorOptions{}
+ if params.MaxUploads != nil {
+ options.Limit = aws.ToInt32(params.MaxUploads)
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListMultipartUploadsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ keyMarker: params.KeyMarker,
+ uploadIDMarker: params.UploadIdMarker,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListMultipartUploadsPaginator) HasMorePages() bool {
+ return p.firstPage || p.isTruncated
+}
+
+// NextPage retrieves the next ListMultipartUploads page.
+func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.KeyMarker = p.keyMarker
+ params.UploadIdMarker = p.uploadIDMarker
+
+ var limit int32
+ if p.options.Limit > 0 {
+ limit = p.options.Limit
+ }
+ if limit > 0 {
+ params.MaxUploads = aws.Int32(limit)
+ }
+
+ result, err := p.client.ListMultipartUploads(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.keyMarker
+ p.isTruncated = aws.ToBool(result.IsTruncated)
+ p.keyMarker = nil
+ p.uploadIDMarker = nil
+ if aws.ToBool(result.IsTruncated) {
+ p.keyMarker = result.NextKeyMarker
+ p.uploadIDMarker = result.NextUploadIdMarker
+ }
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.keyMarker != nil &&
+ *prevToken == *p.keyMarker {
+ p.isTruncated = false
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go
new file mode 100644
index 000000000..97b5771bb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/arn/arn_parser.go
@@ -0,0 +1,106 @@
+package arn
+
+import (
+ "fmt"
+ "strings"
+
+ awsarn "github.com/aws/aws-sdk-go-v2/aws/arn"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn"
+)
+
+const (
+ s3Namespace = "s3"
+ s3ObjectsLambdaNamespace = "s3-object-lambda"
+ s3OutpostsNamespace = "s3-outposts"
+)
+
+// ParseEndpointARN parses a given generic aws ARN into a s3 arn resource.
+func ParseEndpointARN(v awsarn.ARN) (arn.Resource, error) {
+ return arn.ParseResource(v, accessPointResourceParser)
+}
+
+func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
+ resParts := arn.SplitResource(a.Resource)
+
+ switch resParts[0] {
+ case "accesspoint":
+ switch a.Service {
+ case s3Namespace:
+ return arn.ParseAccessPointResource(a, resParts[1:])
+ case s3ObjectsLambdaNamespace:
+ return parseS3ObjectLambdaAccessPointResource(a, resParts)
+ default:
+ return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)}
+ }
+ case "outpost":
+ if a.Service != s3OutpostsNamespace {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not %s"}
+ }
+ return parseOutpostAccessPointResource(a, resParts[1:])
+ default:
+ return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"}
+ }
+}
+
+func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) {
+ // outpost accesspoint arn is only valid if service is s3-outposts
+ if a.Service != "s3-outposts" {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
+ }
+
+ if len(resParts) == 0 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
+ }
+
+ if len(resParts) < 3 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{
+ ARN: a, Reason: "access-point resource not set in Outpost ARN",
+ }
+ }
+
+ resID := strings.TrimSpace(resParts[0])
+ if len(resID) == 0 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
+ }
+
+ var outpostAccessPointARN = arn.OutpostAccessPointARN{}
+ switch resParts[1] {
+ case "accesspoint":
+ // Do not allow region-less outpost access-point arns.
+ if len(a.Region) == 0 {
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "region is not set"}
+ }
+
+ accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:])
+ if err != nil {
+ return arn.OutpostAccessPointARN{}, err
+ }
+ // set access-point arn
+ outpostAccessPointARN.AccessPointARN = accessPointARN
+ default:
+ return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"}
+ }
+
+ // set outpost id
+ outpostAccessPointARN.OutpostID = resID
+ return outpostAccessPointARN, nil
+}
+
+func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) {
+ if a.Service != s3ObjectsLambdaNamespace {
+ return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)}
+ }
+
+ if len(a.Region) == 0 {
+ return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)}
+ }
+
+ accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:])
+ if err != nil {
+ return arn.S3ObjectLambdaAccessPointARN{}, err
+ }
+
+ return arn.S3ObjectLambdaAccessPointARN{
+ AccessPointARN: accessPointARN,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go
new file mode 100644
index 000000000..91b8fde0d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go
@@ -0,0 +1,21 @@
+package customizations
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type bucketKey struct{}
+
+// SetBucket stores a bucket name within the request context, which is required
+// for a variety of custom S3 behaviors.
+func SetBucket(ctx context.Context, bucket string) context.Context {
+ return middleware.WithStackValue(ctx, bucketKey{}, bucket)
+}
+
+// GetBucket retrieves a stored bucket name within a context.
+func GetBucket(ctx context.Context) string {
+ v, _ := middleware.GetStackValue(ctx, bucketKey{}).(string)
+ return v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go
new file mode 100644
index 000000000..e1d1cbefa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/doc.go
@@ -0,0 +1,79 @@
+/*
+Package customizations provides customizations for the Amazon S3 API client.
+
+This package provides support for following S3 customizations
+
+ ProcessARN Middleware: processes an ARN if provided as input and updates the endpoint as per the arn type
+
+ UpdateEndpoint Middleware: resolves a custom endpoint as per s3 config options
+
+ RemoveBucket Middleware: removes a serialized bucket name from request url path
+
+ processResponseWith200Error Middleware: Deserializing response error with 200 status code
+
+# Virtual Host style url addressing
+
+Since serializers serialize by default as path style url, we use customization
+to modify the endpoint url when `UsePathStyle` option on S3Client is unset or
+false. This flag will be ignored if `UseAccelerate` option is set to true.
+
+If UseAccelerate is not enabled, and the bucket name is not a valid hostname
+label, they SDK will fallback to forcing the request to be made as if
+UsePathStyle was enabled. This behavior is also used if UseDualStackEndpoint is enabled.
+
+https://docs.aws.amazon.com/AmazonS3/latest/dev/dual-stack-endpoints.html#dual-stack-endpoints-description
+
+# Transfer acceleration
+
+By default S3 Transfer acceleration support is disabled. By enabling `UseAccelerate`
+option on S3Client, one can enable s3 transfer acceleration support. Transfer
+acceleration only works with Virtual Host style addressing, and thus `UsePathStyle`
+option if set is ignored. Transfer acceleration is not supported for S3 operations
+DeleteBucket, ListBuckets, and CreateBucket.
+
+# Dualstack support
+
+By default dualstack support for s3 client is disabled. By enabling `UseDualstack`
+option on s3 client, you can enable dualstack endpoint support.
+
+# Endpoint customizations
+
+Customizations to lookup ARN, process ARN needs to happen before request serialization.
+UpdateEndpoint middleware which mutates resources based on Options such as
+UseDualstack, UseAccelerate for modifying resolved endpoint are executed after
+request serialization. Remove bucket middleware is executed after
+an request is serialized, and removes the serialized bucket name from request path
+
+ Middleware layering:
+
+ Initialize : HTTP Request -> ARN Lookup -> Input-Validation -> Serialize step
+
+ Serialize : HTTP Request -> Process ARN -> operation serializer -> Update-Endpoint customization -> Remove-Bucket -> next middleware
+
+Customization options:
+
+ UseARNRegion (Disabled by Default)
+
+ UsePathStyle (Disabled by Default)
+
+ UseAccelerate (Disabled by Default)
+
+ UseDualstack (Disabled by Default)
+
+# Handle Error response with 200 status code
+
+S3 operations: CopyObject, CompleteMultipartUpload, UploadPartCopy can have an
+error Response with status code 2xx. The processResponseWith200Error middleware
+customizations enables SDK to check for an error within response body prior to
+deserialization.
+
+As the check for 2xx response containing an error needs to be performed earlier
+than response deserialization. Since the behavior of Deserialization is in
+reverse order to the other stack steps its easier to consider that "after" means
+"before".
+
+ Middleware layering:
+
+ HTTP Response -> handle 200 error customization -> deserialize
+*/
+package customizations
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go
new file mode 100644
index 000000000..8cc0b3624
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go
@@ -0,0 +1,44 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+)
+
+// S3ExpressCredentialsProvider retrieves credentials for the S3Express storage
+// class.
+type S3ExpressCredentialsProvider interface {
+ Retrieve(ctx context.Context, bucket string) (aws.Credentials, error)
+}
+
+// ExpressIdentityResolver retrieves identity for the S3Express storage class.
+type ExpressIdentityResolver struct {
+ Provider S3ExpressCredentialsProvider
+}
+
+var _ (auth.IdentityResolver) = (*ExpressIdentityResolver)(nil)
+
+// GetIdentity retrieves AWS credentials using the underlying provider.
+func (v *ExpressIdentityResolver) GetIdentity(ctx context.Context, props smithy.Properties) (
+ auth.Identity, error,
+) {
+ bucket, ok := GetIdentityPropertiesBucket(&props)
+ if !ok {
+ bucket = GetBucket(ctx)
+ }
+ if bucket == "" {
+ return nil, fmt.Errorf("bucket name is missing")
+ }
+
+ creds, err := v.Provider.Retrieve(ctx, bucket)
+ if err != nil {
+ return nil, fmt.Errorf("get credentials: %v", err)
+ }
+
+ return &internalauthsmithy.CredentialsAdapter{Credentials: creds}, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go
new file mode 100644
index 000000000..bb22d3474
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go
@@ -0,0 +1,18 @@
+package customizations
+
+type s3DisableExpressAuthProvider interface {
+ GetS3DisableExpressAuth() (bool, bool)
+}
+
+// ResolveDisableExpressAuth pulls S3DisableExpressAuth setting from config
+// sources.
+func ResolveDisableExpressAuth(configs []interface{}) (value bool, exists bool) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(s3DisableExpressAuthProvider); ok {
+ if value, exists = p.GetS3DisableExpressAuth(); exists {
+ break
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go
new file mode 100644
index 000000000..cf3ff5966
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go
@@ -0,0 +1,42 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+
+ ictx "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type expressDefaultChecksumMiddleware struct{}
+
+func (*expressDefaultChecksumMiddleware) ID() string {
+ return "expressDefaultChecksum"
+}
+
+func (*expressDefaultChecksumMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if ictx.GetS3Backend(ctx) == ictx.S3BackendS3Express && ictx.GetChecksumInputAlgorithm(ctx) == "" {
+ ctx = ictx.SetChecksumInputAlgorithm(ctx, string(checksum.AlgorithmCRC32))
+ }
+ return next.HandleFinalize(ctx, in)
+}
+
+// AddExpressDefaultChecksumMiddleware appends a step to default to CRC32 for
+// S3Express requests. This should only be applied to operations where a
+// checksum is required (e.g. DeleteObject).
+func AddExpressDefaultChecksumMiddleware(s *middleware.Stack) error {
+ err := s.Finalize.Insert(
+ &expressDefaultChecksumMiddleware{},
+ "AWSChecksum:ComputeInputPayloadChecksum",
+ middleware.Before,
+ )
+ if err != nil {
+ return fmt.Errorf("add expressDefaultChecksum: %v", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go
new file mode 100644
index 000000000..171de4613
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go
@@ -0,0 +1,21 @@
+package customizations
+
+import "github.com/aws/smithy-go"
+
+// GetPropertiesBackend returns a resolved endpoint backend from the property
+// set.
+func GetPropertiesBackend(p *smithy.Properties) string {
+ v, _ := p.Get("backend").(string)
+ return v
+}
+
+// GetIdentityPropertiesBucket returns the S3 bucket from identity properties.
+func GetIdentityPropertiesBucket(ip *smithy.Properties) (string, bool) {
+ v, ok := ip.Get(bucketKey{}).(string)
+ return v, ok
+}
+
+// SetIdentityPropertiesBucket sets the S3 bucket to identity properties.
+func SetIdentityPropertiesBucket(ip *smithy.Properties, bucket string) {
+ ip.Set(bucketKey{}, bucket)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go
new file mode 100644
index 000000000..545e5b220
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go
@@ -0,0 +1,109 @@
+package customizations
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/smithy-go/middleware"
+)
+
+const (
+ s3ExpressSignerVersion = "com.amazonaws.s3#sigv4express"
+ headerAmzSessionToken = "x-amz-s3session-token"
+)
+
+// adapts a v4 signer for S3Express
+type s3ExpressSignerAdapter struct {
+ v4 v4.HTTPSigner
+}
+
+// SignHTTP performs S3Express signing on a request, which is identical to
+// SigV4 signing save for an additional header containing the S3Express
+// session token.
+func (s *s3ExpressSignerAdapter) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error {
+ r.Header.Set(headerAmzSessionToken, credentials.SessionToken)
+ optFns = append(optFns, func(o *v4.SignerOptions) {
+ o.DisableSessionToken = true
+ })
+ return s.v4.SignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...)
+}
+
+// adapts S3ExpressCredentialsProvider to the standard AWS
+// CredentialsProvider interface
+type s3ExpressCredentialsAdapter struct {
+ provider S3ExpressCredentialsProvider
+ bucket string
+}
+
+func (c *s3ExpressCredentialsAdapter) Retrieve(ctx context.Context) (aws.Credentials, error) {
+ return c.provider.Retrieve(ctx, c.bucket)
+}
+
+// S3ExpressSignHTTPRequestMiddleware signs S3 S3Express requests.
+//
+// This is NOT mutually exclusive with existing v4 or v4a signer handling on
+// the stack itself, but only one handler will actually perform signing based
+// on the provided signing version in the context.
+type S3ExpressSignHTTPRequestMiddleware struct {
+ Credentials S3ExpressCredentialsProvider
+ Signer v4.HTTPSigner
+ LogSigning bool
+}
+
+// ID identifies S3ExpressSignHTTPRequestMiddleware.
+func (*S3ExpressSignHTTPRequestMiddleware) ID() string {
+ return "S3ExpressSigning"
+}
+
+// HandleFinalize will sign the request if the S3Express signer has been
+// selected.
+func (m *S3ExpressSignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ if GetSignerVersion(ctx) != s3ExpressSignerVersion {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: m.credentialsAdapter(ctx),
+ Signer: m.signerAdapter(),
+ LogSigning: m.LogSigning,
+ })
+ return mw.HandleFinalize(ctx, in, next)
+}
+
+func (m *S3ExpressSignHTTPRequestMiddleware) credentialsAdapter(ctx context.Context) aws.CredentialsProvider {
+ return &s3ExpressCredentialsAdapter{
+ provider: m.Credentials,
+ bucket: GetBucket(ctx),
+ }
+}
+
+func (m *S3ExpressSignHTTPRequestMiddleware) signerAdapter() v4.HTTPSigner {
+ return &s3ExpressSignerAdapter{v4: m.Signer}
+}
+
+type s3ExpressPresignerAdapter struct {
+ v4 v4.HTTPPresigner
+}
+
+// SignHTTP performs S3Express signing on a request, which is identical to
+// SigV4 signing save for an additional header containing the S3Express
+// session token.
+func (s *s3ExpressPresignerAdapter) PresignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) (
+ string, http.Header, error,
+) {
+ r.Header.Set(headerAmzSessionToken, credentials.SessionToken)
+ optFns = append(optFns, func(o *v4.SignerOptions) {
+ o.DisableSessionToken = true
+ })
+ return s.v4.PresignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...)
+}
+
+var (
+ _ aws.CredentialsProvider = &s3ExpressCredentialsAdapter{}
+ _ v4.HTTPSigner = &s3ExpressSignerAdapter{}
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go
new file mode 100644
index 000000000..e3ec7f011
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go
@@ -0,0 +1,61 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// ExpressSigner signs requests for the sigv4-s3express auth scheme.
+//
+// This signer respects the aws.auth#sigv4 properties for signing name and
+// region.
+type ExpressSigner struct {
+ Signer v4.HTTPSigner
+ Logger logging.Logger
+ LogSigning bool
+}
+
+var _ (smithyhttp.Signer) = (*ExpressSigner)(nil)
+
+// SignRequest signs the request with the provided identity.
+func (v *ExpressSigner) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error {
+ ca, ok := identity.(*internalauthsmithy.CredentialsAdapter)
+ if !ok {
+ return fmt.Errorf("unexpected identity type: %T", identity)
+ }
+
+ name, ok := smithyhttp.GetSigV4SigningName(&props)
+ if !ok {
+ return fmt.Errorf("sigv4 signing name is required for s3express variant")
+ }
+
+ region, ok := smithyhttp.GetSigV4SigningRegion(&props)
+ if !ok {
+ return fmt.Errorf("sigv4 signing region is required for s3express variant")
+ }
+
+ hash := v4.GetPayloadHash(ctx)
+
+ r.Header.Set(headerAmzSessionToken, ca.Credentials.SessionToken)
+ err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) {
+ o.DisableSessionToken = true
+
+ o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props)
+
+ o.Logger = v.Logger
+ o.LogSigning = v.LogSigning
+ })
+ if err != nil {
+ return fmt.Errorf("sign http: %v", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go
new file mode 100644
index 000000000..2b11b1fa2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/handle_200_error.go
@@ -0,0 +1,74 @@
+package customizations
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+
+ "github.com/aws/smithy-go"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// HandleResponseErrorWith200Status check for S3 200 error response.
+// If an s3 200 error is found, status code for the response is modified temporarily to
+// 5xx response status code.
+func HandleResponseErrorWith200Status(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&processResponseFor200ErrorMiddleware{}, "OperationDeserializer", middleware.After)
+}
+
+// middleware to process raw response and look for error response with 200 status code
+type processResponseFor200ErrorMiddleware struct{}
+
+// ID returns the middleware ID.
+func (*processResponseFor200ErrorMiddleware) ID() string {
+ return "S3:ProcessResponseFor200Error"
+}
+
+func (m *processResponseFor200ErrorMiddleware) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ // check if response status code is 2xx.
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return
+ }
+
+ var readBuff bytes.Buffer
+ body := io.TeeReader(response.Body, &readBuff)
+
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("received empty response payload"),
+ }
+ }
+
+ // rewind response body
+ response.Body = ioutil.NopCloser(io.MultiReader(&readBuff, response.Body))
+
+ // if start tag is "Error", the response is consider error response.
+ if strings.EqualFold(t.Name.Local, "Error") {
+ // according to https://aws.amazon.com/premiumsupport/knowledge-center/s3-resolve-200-internalerror/
+ // 200 error responses are similar to 5xx errors.
+ response.StatusCode = 500
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go
new file mode 100644
index 000000000..87f7a2232
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/host.go
@@ -0,0 +1,22 @@
+package customizations
+
+import (
+ "github.com/aws/smithy-go/transport/http"
+ "strings"
+)
+
+func updateS3HostForS3AccessPoint(req *http.Request) {
+ updateHostPrefix(req, "s3", s3AccessPoint)
+}
+
+func updateS3HostForS3ObjectLambda(req *http.Request) {
+ updateHostPrefix(req, "s3", s3ObjectLambda)
+}
+
+func updateHostPrefix(req *http.Request, oldEndpointPrefix, newEndpointPrefix string) {
+ host := req.URL.Host
+ if strings.HasPrefix(host, oldEndpointPrefix) {
+ // For example if oldEndpointPrefix=s3 would replace to newEndpointPrefix
+ req.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):]
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go
new file mode 100644
index 000000000..f4bbb4b6d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/presigned_expires.go
@@ -0,0 +1,49 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddExpiresOnPresignedURL represents a build middleware used to assign
+// expiration on a presigned URL.
+type AddExpiresOnPresignedURL struct {
+
+ // Expires is time.Duration within which presigned url should be expired.
+ // This should be the duration in seconds the presigned URL should be considered valid for.
+ // By default the S3 presigned url expires in 15 minutes ie. 900 seconds.
+ Expires time.Duration
+}
+
+// ID representing the middleware
+func (*AddExpiresOnPresignedURL) ID() string {
+ return "S3:AddExpiresOnPresignedURL"
+}
+
+// HandleBuild handles the build step middleware behavior
+func (m *AddExpiresOnPresignedURL) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ // if expiration is unset skip this middleware
+ if m.Expires == 0 {
+ // default to 15 * time.Minutes
+ m.Expires = 15 * time.Minute
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", req)
+ }
+
+ // set S3 X-AMZ-Expires header
+ query := req.URL.Query()
+ query.Set("X-Amz-Expires", strconv.FormatInt(int64(m.Expires/time.Second), 10))
+ req.URL.RawQuery = query.Encode()
+
+ return next.HandleBuild(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go
new file mode 100644
index 000000000..bbc971f2a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go
@@ -0,0 +1,568 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/transport/http"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/internal/v4a"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn"
+ s3arn "github.com/aws/aws-sdk-go-v2/service/s3/internal/arn"
+ "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints"
+)
+
+const (
+ s3AccessPoint = "s3-accesspoint"
+ s3ObjectLambda = "s3-object-lambda"
+)
+
+// processARNResource is used to process an ARN resource.
+type processARNResource struct {
+
+ // UseARNRegion indicates if region parsed from an ARN should be used.
+ UseARNRegion bool
+
+ // UseAccelerate indicates if s3 transfer acceleration is enabled
+ UseAccelerate bool
+
+ // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver
+ EndpointResolver EndpointResolver
+
+ // EndpointResolverOptions used by endpoint resolver
+ EndpointResolverOptions EndpointResolverOptions
+
+ // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled
+ DisableMultiRegionAccessPoints bool
+}
+
+// ID returns the middleware ID.
+func (*processARNResource) ID() string { return "S3:ProcessARNResource" }
+
+func (m *processARNResource) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ // check if arn was provided, if not skip this middleware
+ arnValue, ok := s3shared.GetARNResourceFromContext(ctx)
+ if !ok {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*http.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // parse arn into an endpoint arn wrt to service
+ resource, err := s3arn.ParseEndpointARN(arnValue)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ // build a resource request struct
+ resourceRequest := s3shared.ResourceRequest{
+ Resource: resource,
+ UseARNRegion: m.UseARNRegion,
+ UseFIPS: m.EndpointResolverOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled,
+ RequestRegion: awsmiddleware.GetRegion(ctx),
+ SigningRegion: awsmiddleware.GetSigningRegion(ctx),
+ PartitionID: awsmiddleware.GetPartitionID(ctx),
+ }
+
+ // switch to correct endpoint updater
+ switch tv := resource.(type) {
+ case arn.AccessPointARN:
+ // multi-region arns do not need to validate for cross partition request
+ if len(tv.Region) != 0 {
+ // validate resource request
+ if err := validateRegionForResourceRequest(resourceRequest); err != nil {
+ return out, metadata, err
+ }
+ }
+
+ // Special handling for region-less ap-arns.
+ if len(tv.Region) == 0 {
+ // check if multi-region arn support is disabled
+ if m.DisableMultiRegionAccessPoints {
+ return out, metadata, fmt.Errorf("Invalid configuration, Multi-Region access point ARNs are disabled")
+ }
+
+ // Do not allow dual-stack configuration with multi-region arns.
+ if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled {
+ return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+ }
+
+ // check if accelerate
+ if m.UseAccelerate {
+ return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // fetch arn region to resolve request
+ resolveRegion := tv.Region
+ // check if request region is FIPS
+ if resourceRequest.UseFIPS && len(resolveRegion) == 0 {
+ // Do not allow Fips support within multi-region arns.
+ return out, metadata, s3shared.NewClientConfiguredForFIPSError(
+ tv, resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ var requestBuilder func(context.Context, accesspointOptions) (context.Context, error)
+ if len(resolveRegion) == 0 {
+ requestBuilder = buildMultiRegionAccessPointsRequest
+ } else {
+ requestBuilder = buildAccessPointRequest
+ }
+
+ // build request as per accesspoint builder
+ ctx, err = requestBuilder(ctx, accesspointOptions{
+ processARNResource: *m,
+ request: req,
+ resource: tv,
+ resolveRegion: resolveRegion,
+ partitionID: resourceRequest.PartitionID,
+ requestRegion: resourceRequest.RequestRegion,
+ })
+ if err != nil {
+ return out, metadata, err
+ }
+
+ case arn.S3ObjectLambdaAccessPointARN:
+ // validate region for resource request
+ if err := validateRegionForResourceRequest(resourceRequest); err != nil {
+ return out, metadata, err
+ }
+
+ // check if accelerate
+ if m.UseAccelerate {
+ return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // check if dualstack
+ if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled {
+ return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // fetch arn region to resolve request
+ resolveRegion := tv.Region
+
+ // build access point request
+ ctx, err = buildS3ObjectLambdaAccessPointRequest(ctx, accesspointOptions{
+ processARNResource: *m,
+ request: req,
+ resource: tv.AccessPointARN,
+ resolveRegion: resolveRegion,
+ partitionID: resourceRequest.PartitionID,
+ requestRegion: resourceRequest.RequestRegion,
+ })
+ if err != nil {
+ return out, metadata, err
+ }
+
+ // process outpost accesspoint ARN
+ case arn.OutpostAccessPointARN:
+ // validate region for resource request
+ if err := validateRegionForResourceRequest(resourceRequest); err != nil {
+ return out, metadata, err
+ }
+
+ // check if accelerate
+ if m.UseAccelerate {
+ return out, metadata, s3shared.NewClientConfiguredForAccelerateError(tv,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // check if dual stack
+ if m.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled {
+ return out, metadata, s3shared.NewClientConfiguredForDualStackError(tv,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // check if request region is FIPS
+ if resourceRequest.UseFIPS {
+ return out, metadata, s3shared.NewFIPSConfigurationError(tv, resourceRequest.PartitionID,
+ resourceRequest.RequestRegion, nil)
+ }
+
+ // build outpost access point request
+ ctx, err = buildOutpostAccessPointRequest(ctx, outpostAccessPointOptions{
+ processARNResource: *m,
+ resource: tv,
+ request: req,
+ partitionID: resourceRequest.PartitionID,
+ requestRegion: resourceRequest.RequestRegion,
+ })
+ if err != nil {
+ return out, metadata, err
+ }
+
+ default:
+ return out, metadata, s3shared.NewInvalidARNError(resource, nil)
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+// validate if s3 resource and request region config is compatible.
+func validateRegionForResourceRequest(resourceRequest s3shared.ResourceRequest) error {
+ // check if resourceRequest leads to a cross partition error
+ v, err := resourceRequest.IsCrossPartition()
+ if err != nil {
+ return err
+ }
+ if v {
+ // if cross partition
+ return s3shared.NewClientPartitionMismatchError(resourceRequest.Resource,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ // check if resourceRequest leads to a cross region error
+ if !resourceRequest.AllowCrossRegion() && resourceRequest.IsCrossRegion() {
+ // if cross region, but not use ARN region is not enabled
+ return s3shared.NewClientRegionMismatchError(resourceRequest.Resource,
+ resourceRequest.PartitionID, resourceRequest.RequestRegion, nil)
+ }
+
+ return nil
+}
+
+// === Accesspoint ==========
+
+type accesspointOptions struct {
+ processARNResource
+ request *http.Request
+ resource arn.AccessPointARN
+ resolveRegion string
+ partitionID string
+ requestRegion string
+}
+
+func buildAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) {
+ tv := options.resource
+ req := options.request
+ resolveRegion := options.resolveRegion
+
+ resolveService := tv.Service
+
+ ero := options.EndpointResolverOptions
+ ero.Logger = middleware.GetLogger(ctx)
+ ero.ResolvedRegion = "" // clear endpoint option's resolved region so that we resolve using the passed in region
+
+ // resolve endpoint
+ endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero)
+ if err != nil {
+ return ctx, s3shared.NewFailedToResolveEndpointError(
+ tv,
+ options.partitionID,
+ options.requestRegion,
+ err,
+ )
+ }
+
+ // assign resolved endpoint url to request url
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom {
+ ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName)
+ } else {
+ // Must sign with s3-object-lambda
+ ctx = awsmiddleware.SetSigningName(ctx, resolveService)
+ }
+
+ if len(endpoint.SigningRegion) != 0 {
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ } else {
+ ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion)
+ }
+
+ // update serviceID to "s3-accesspoint"
+ ctx = awsmiddleware.SetServiceID(ctx, s3AccessPoint)
+
+ // disable host prefix behavior
+ ctx = http.DisableEndpointHostPrefix(ctx, true)
+
+ // remove the serialized arn in place of /{Bucket}
+ ctx = setBucketToRemoveOnContext(ctx, tv.String())
+
+ // skip arn processing, if arn region resolves to a immutable endpoint
+ if endpoint.HostnameImmutable {
+ return ctx, nil
+ }
+
+ updateS3HostForS3AccessPoint(req)
+
+ ctx, err = buildAccessPointHostPrefix(ctx, req, tv)
+ if err != nil {
+ return ctx, err
+ }
+
+ return ctx, nil
+}
+
+func buildS3ObjectLambdaAccessPointRequest(ctx context.Context, options accesspointOptions) (context.Context, error) {
+ tv := options.resource
+ req := options.request
+ resolveRegion := options.resolveRegion
+
+ resolveService := tv.Service
+
+ ero := options.EndpointResolverOptions
+ ero.Logger = middleware.GetLogger(ctx)
+ ero.ResolvedRegion = "" // clear endpoint options resolved region so we resolve the passed in region
+
+ // resolve endpoint
+ endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero)
+ if err != nil {
+ return ctx, s3shared.NewFailedToResolveEndpointError(
+ tv,
+ options.partitionID,
+ options.requestRegion,
+ err,
+ )
+ }
+
+ // assign resolved endpoint url to request url
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom {
+ ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName)
+ } else {
+ // Must sign with s3-object-lambda
+ ctx = awsmiddleware.SetSigningName(ctx, resolveService)
+ }
+
+ if len(endpoint.SigningRegion) != 0 {
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ } else {
+ ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion)
+ }
+
+ // update serviceID to "s3-object-lambda"
+ ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda)
+
+ // disable host prefix behavior
+ ctx = http.DisableEndpointHostPrefix(ctx, true)
+
+ // remove the serialized arn in place of /{Bucket}
+ ctx = setBucketToRemoveOnContext(ctx, tv.String())
+
+ // skip arn processing, if arn region resolves to a immutable endpoint
+ if endpoint.HostnameImmutable {
+ return ctx, nil
+ }
+
+ if endpoint.Source == aws.EndpointSourceServiceMetadata {
+ updateS3HostForS3ObjectLambda(req)
+ }
+
+ ctx, err = buildAccessPointHostPrefix(ctx, req, tv)
+ if err != nil {
+ return ctx, err
+ }
+
+ return ctx, nil
+}
+
+func buildMultiRegionAccessPointsRequest(ctx context.Context, options accesspointOptions) (context.Context, error) {
+ const s3GlobalLabel = "s3-global."
+ const accesspointLabel = "accesspoint."
+
+ tv := options.resource
+ req := options.request
+ resolveService := tv.Service
+ resolveRegion := options.requestRegion
+ arnPartition := tv.Partition
+
+ // resolve endpoint
+ ero := options.EndpointResolverOptions
+ ero.Logger = middleware.GetLogger(ctx)
+
+ endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero)
+ if err != nil {
+ return ctx, s3shared.NewFailedToResolveEndpointError(
+ tv,
+ options.partitionID,
+ options.requestRegion,
+ err,
+ )
+ }
+
+ // set signing region and version for MRAP
+ endpoint.SigningRegion = "*"
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ ctx = SetSignerVersion(ctx, v4a.Version)
+
+ if len(endpoint.SigningName) != 0 {
+ ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName)
+ } else {
+ ctx = awsmiddleware.SetSigningName(ctx, resolveService)
+ }
+
+ // skip arn processing, if arn region resolves to a immutable endpoint
+ if endpoint.HostnameImmutable {
+ return ctx, nil
+ }
+
+ // modify endpoint host to use s3-global host prefix
+ scheme := strings.SplitN(endpoint.URL, "://", 2)
+ dnsSuffix, err := endpoints.GetDNSSuffix(arnPartition, ero)
+ if err != nil {
+ return ctx, fmt.Errorf("Error determining dns suffix from arn partition, %w", err)
+ }
+ // set url as per partition
+ endpoint.URL = scheme[0] + "://" + s3GlobalLabel + dnsSuffix
+
+ // assign resolved endpoint url to request url
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ // build access point host prefix
+ accessPointHostPrefix := tv.AccessPointName + "." + accesspointLabel
+
+ // add host prefix to url
+ req.URL.Host = accessPointHostPrefix + req.URL.Host
+ if len(req.Host) > 0 {
+ req.Host = accessPointHostPrefix + req.Host
+ }
+
+ // validate the endpoint host
+ if err := http.ValidateEndpointHost(req.URL.Host); err != nil {
+ return ctx, fmt.Errorf("endpoint validation error: %w, when using arn %v", err, tv)
+ }
+
+ // disable host prefix behavior
+ ctx = http.DisableEndpointHostPrefix(ctx, true)
+
+ // remove the serialized arn in place of /{Bucket}
+ ctx = setBucketToRemoveOnContext(ctx, tv.String())
+
+ return ctx, nil
+}
+
+func buildAccessPointHostPrefix(ctx context.Context, req *http.Request, tv arn.AccessPointARN) (context.Context, error) {
+ // add host prefix for access point
+ accessPointHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "."
+ req.URL.Host = accessPointHostPrefix + req.URL.Host
+ if len(req.Host) > 0 {
+ req.Host = accessPointHostPrefix + req.Host
+ }
+
+ // validate the endpoint host
+ if err := http.ValidateEndpointHost(req.URL.Host); err != nil {
+ return ctx, s3shared.NewInvalidARNError(tv, err)
+ }
+
+ return ctx, nil
+}
+
+// ====== Outpost Accesspoint ========
+
+type outpostAccessPointOptions struct {
+ processARNResource
+ request *http.Request
+ resource arn.OutpostAccessPointARN
+ partitionID string
+ requestRegion string
+}
+
+func buildOutpostAccessPointRequest(ctx context.Context, options outpostAccessPointOptions) (context.Context, error) {
+ tv := options.resource
+ req := options.request
+
+ resolveRegion := tv.Region
+ resolveService := tv.Service
+ endpointsID := resolveService
+ if strings.EqualFold(resolveService, "s3-outposts") {
+ // assign endpoints ID as "S3"
+ endpointsID = "s3"
+ }
+
+ ero := options.EndpointResolverOptions
+ ero.Logger = middleware.GetLogger(ctx)
+ ero.ResolvedRegion = ""
+
+ // resolve regional endpoint for resolved region.
+ endpoint, err := options.EndpointResolver.ResolveEndpoint(resolveRegion, ero)
+ if err != nil {
+ return ctx, s3shared.NewFailedToResolveEndpointError(
+ tv,
+ options.partitionID,
+ options.requestRegion,
+ err,
+ )
+ }
+
+ // assign resolved endpoint url to request url
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return ctx, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ // assign resolved service from arn as signing name
+ if len(endpoint.SigningName) != 0 && endpoint.Source == aws.EndpointSourceCustom {
+ ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName)
+ } else {
+ ctx = awsmiddleware.SetSigningName(ctx, resolveService)
+ }
+
+ if len(endpoint.SigningRegion) != 0 {
+ // redirect signer to use resolved endpoint signing name and region
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ } else {
+ ctx = awsmiddleware.SetSigningRegion(ctx, resolveRegion)
+ }
+
+ // update serviceID to resolved service id
+ ctx = awsmiddleware.SetServiceID(ctx, resolveService)
+
+ // disable host prefix behavior
+ ctx = http.DisableEndpointHostPrefix(ctx, true)
+
+ // remove the serialized arn in place of /{Bucket}
+ ctx = setBucketToRemoveOnContext(ctx, tv.String())
+
+ // skip further customizations, if arn region resolves to a immutable endpoint
+ if endpoint.HostnameImmutable {
+ return ctx, nil
+ }
+
+ updateHostPrefix(req, endpointsID, resolveService)
+
+ // add host prefix for s3-outposts
+ outpostAPHostPrefix := tv.AccessPointName + "-" + tv.AccountID + "." + tv.OutpostID + "."
+ req.URL.Host = outpostAPHostPrefix + req.URL.Host
+ if len(req.Host) > 0 {
+ req.Host = outpostAPHostPrefix + req.Host
+ }
+
+ // validate the endpoint host
+ if err := http.ValidateEndpointHost(req.URL.Host); err != nil {
+ return ctx, s3shared.NewInvalidARNError(tv, err)
+ }
+
+ return ctx, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go
new file mode 100644
index 000000000..cf3f4dc8b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go
@@ -0,0 +1,63 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/transport/http"
+)
+
+// removeBucketFromPathMiddleware needs to be executed after serialize step is performed
+type removeBucketFromPathMiddleware struct {
+}
+
+func (m *removeBucketFromPathMiddleware) ID() string {
+ return "S3:RemoveBucketFromPathMiddleware"
+}
+
+func (m *removeBucketFromPathMiddleware) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ // check if a bucket removal from HTTP path is required
+ bucket, ok := getRemoveBucketFromPath(ctx)
+ if !ok {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*http.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ removeBucketFromPath(req.URL, bucket)
+ return next.HandleSerialize(ctx, in)
+}
+
+type removeBucketKey struct {
+ bucket string
+}
+
+// setBucketToRemoveOnContext sets the bucket name to be removed.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func setBucketToRemoveOnContext(ctx context.Context, bucket string) context.Context {
+ return middleware.WithStackValue(ctx, removeBucketKey{}, bucket)
+}
+
+// getRemoveBucketFromPath returns the bucket name to remove from the path.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func getRemoveBucketFromPath(ctx context.Context) (string, bool) {
+ v, ok := middleware.GetStackValue(ctx, removeBucketKey{}).(string)
+ return v, ok
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go
new file mode 100644
index 000000000..6e1d44724
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go
@@ -0,0 +1,88 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/transport/http"
+ "net/url"
+)
+
+type s3ObjectLambdaEndpoint struct {
+ // whether the operation should use the s3-object-lambda endpoint
+ UseEndpoint bool
+
+ // use transfer acceleration
+ UseAccelerate bool
+
+ EndpointResolver EndpointResolver
+ EndpointResolverOptions EndpointResolverOptions
+}
+
+func (t *s3ObjectLambdaEndpoint) ID() string {
+ return "S3:ObjectLambdaEndpoint"
+}
+
+func (t *s3ObjectLambdaEndpoint) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ if !t.UseEndpoint {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*http.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request)
+ }
+
+ if t.EndpointResolverOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled {
+ return out, metadata, fmt.Errorf("client configured for dualstack but not supported for operation")
+ }
+
+ if t.UseAccelerate {
+ return out, metadata, fmt.Errorf("client configured for accelerate but not supported for operation")
+ }
+
+ region := awsmiddleware.GetRegion(ctx)
+
+ ero := t.EndpointResolverOptions
+
+ endpoint, err := t.EndpointResolver.ResolveEndpoint(region, ero)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ // Set the ServiceID and SigningName
+ ctx = awsmiddleware.SetServiceID(ctx, s3ObjectLambda)
+
+ if len(endpoint.SigningName) > 0 && endpoint.Source == aws.EndpointSourceCustom {
+ ctx = awsmiddleware.SetSigningName(ctx, endpoint.SigningName)
+ } else {
+ ctx = awsmiddleware.SetSigningName(ctx, s3ObjectLambda)
+ }
+
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ if len(endpoint.SigningRegion) > 0 {
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ } else {
+ ctx = awsmiddleware.SetSigningRegion(ctx, region)
+ }
+
+ if endpoint.Source == aws.EndpointSourceServiceMetadata || !endpoint.HostnameImmutable {
+ updateS3HostForS3ObjectLambda(req)
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go
new file mode 100644
index 000000000..756823cb7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go
@@ -0,0 +1,227 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/aws-sdk-go-v2/internal/v4a"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type signerVersionKey struct{}
+
+// GetSignerVersion retrieves the signer version to use for signing
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetSignerVersion(ctx context.Context) (v string) {
+ v, _ = middleware.GetStackValue(ctx, signerVersionKey{}).(string)
+ return v
+}
+
+// SetSignerVersion sets the signer version to be used for signing the request
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetSignerVersion(ctx context.Context, version string) context.Context {
+ return middleware.WithStackValue(ctx, signerVersionKey{}, version)
+}
+
+// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware.
+type SignHTTPRequestMiddlewareOptions struct {
+
+ // credential provider
+ CredentialsProvider aws.CredentialsProvider
+
+ // log signing
+ LogSigning bool
+
+ // v4 signer
+ V4Signer v4.HTTPSigner
+
+ //v4a signer
+ V4aSigner v4a.HTTPSigner
+}
+
+// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests
+func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware {
+ return &SignHTTPRequestMiddleware{
+ credentialsProvider: options.CredentialsProvider,
+ v4Signer: options.V4Signer,
+ v4aSigner: options.V4aSigner,
+ logSigning: options.LogSigning,
+ }
+}
+
+// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation to select HTTP Signing method
+type SignHTTPRequestMiddleware struct {
+
+ // credential provider
+ credentialsProvider aws.CredentialsProvider
+
+ // log signing
+ logSigning bool
+
+ // v4 signer
+ v4Signer v4.HTTPSigner
+
+ //v4a signer
+ v4aSigner v4a.HTTPSigner
+}
+
+// ID is the SignHTTPRequestMiddleware identifier
+func (s *SignHTTPRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+// HandleFinalize will take the provided input and handle signing for either
+// SigV4 or SigV4A as called for.
+func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ sv := GetSignerVersion(ctx)
+
+ if strings.EqualFold(sv, v4.Version) {
+ mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: s.credentialsProvider,
+ Signer: s.v4Signer,
+ LogSigning: s.logSigning,
+ })
+ return mw.HandleFinalize(ctx, in, next)
+ } else if strings.EqualFold(sv, v4a.Version) {
+ v4aCredentialProvider, ok := s.credentialsProvider.(v4a.CredentialsProvider)
+ if !ok {
+ return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer")
+ }
+
+ mw := v4a.NewSignHTTPRequestMiddleware(v4a.SignHTTPRequestMiddlewareOptions{
+ Credentials: v4aCredentialProvider,
+ Signer: s.v4aSigner,
+ LogSigning: s.logSigning,
+ })
+ return mw.HandleFinalize(ctx, in, next)
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+// RegisterSigningMiddleware registers the wrapper signing middleware to the stack. If a signing middleware is already
+// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the
+// finalize step.
+func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignHTTPRequestMiddleware) (err error) {
+ const signedID = "Signing"
+ _, present := stack.Finalize.Get(signedID)
+ if present {
+ _, err = stack.Finalize.Swap(signedID, signingMiddleware)
+ } else {
+ err = stack.Finalize.Add(signingMiddleware, middleware.After)
+ }
+ return err
+}
+
+// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware.
+type PresignHTTPRequestMiddlewareOptions struct {
+ CredentialsProvider aws.CredentialsProvider
+ ExpressCredentials S3ExpressCredentialsProvider
+ V4Presigner v4.HTTPPresigner
+ V4aPresigner v4a.HTTPPresigner
+ LogSigning bool
+}
+
+// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a
+// presigned URL for an HTTP request.
+//
+// Will short circuit the middleware stack and not forward onto the next
+// Finalize handler.
+type PresignHTTPRequestMiddleware struct {
+
+ // cred provider and signer for sigv4
+ credentialsProvider aws.CredentialsProvider
+
+ // s3Express credentials
+ expressCredentials S3ExpressCredentialsProvider
+
+ // sigV4 signer
+ v4Signer v4.HTTPPresigner
+
+ // sigV4a signer
+ v4aSigner v4a.HTTPPresigner
+
+ // log signing
+ logSigning bool
+}
+
+// NewPresignHTTPRequestMiddleware constructs a PresignHTTPRequestMiddleware using the given Signer for signing requests
+func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware {
+ return &PresignHTTPRequestMiddleware{
+ credentialsProvider: options.CredentialsProvider,
+ expressCredentials: options.ExpressCredentials,
+ v4Signer: options.V4Presigner,
+ v4aSigner: options.V4aPresigner,
+ logSigning: options.LogSigning,
+ }
+}
+
+// ID provides the middleware ID.
+func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" }
+
+// HandleFinalize will take the provided input and create a presigned url for
+// the http request using the SigV4 or SigV4a presign authentication scheme.
+//
+// Since the signed request is not a valid HTTP request
+func (p *PresignHTTPRequestMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ // fetch signer type from context
+ signerVersion := GetSignerVersion(ctx)
+
+ switch signerVersion {
+ case "aws.auth#sigv4a":
+ mw := v4a.NewPresignHTTPRequestMiddleware(v4a.PresignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: &v4a.SymmetricCredentialAdaptor{
+ SymmetricProvider: p.credentialsProvider,
+ },
+ Presigner: p.v4aSigner,
+ LogSigning: p.logSigning,
+ })
+ return mw.HandleFinalize(ctx, in, next)
+ case "aws.auth#sigv4":
+ mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: p.credentialsProvider,
+ Presigner: p.v4Signer,
+ LogSigning: p.logSigning,
+ })
+ return mw.HandleFinalize(ctx, in, next)
+ case s3ExpressSignerVersion:
+ mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: &s3ExpressCredentialsAdapter{
+ provider: p.expressCredentials,
+ bucket: GetBucket(ctx),
+ },
+ Presigner: &s3ExpressPresignerAdapter{v4: p.v4Signer},
+ LogSigning: p.logSigning,
+ })
+ return mw.HandleFinalize(ctx, in, next)
+ default:
+ return out, metadata, fmt.Errorf("unsupported signer type \"%s\"", signerVersion)
+ }
+}
+
+// RegisterPreSigningMiddleware registers the wrapper pre-signing middleware to the stack. If a pre-signing middleware is already
+// present, this provided middleware will be swapped. Otherwise the middleware will be added at the tail of the
+// finalize step.
+func RegisterPreSigningMiddleware(stack *middleware.Stack, signingMiddleware *PresignHTTPRequestMiddleware) (err error) {
+ const signedID = "PresignHTTPRequest"
+ _, present := stack.Finalize.Get(signedID)
+ if present {
+ _, err = stack.Finalize.Swap(signedID, signingMiddleware)
+ } else {
+ err = stack.Finalize.Add(signingMiddleware, middleware.After)
+ }
+ return err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go
new file mode 100644
index 000000000..eedfc7eef
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go
@@ -0,0 +1,310 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "net/url"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// UpdateEndpointParameterAccessor represents accessor functions used by the middleware
+type UpdateEndpointParameterAccessor struct {
+ // functional pointer to fetch bucket name from provided input.
+ // The function is intended to take an input value, and
+ // return a string pointer to value of string, and bool if
+ // input has no bucket member.
+ GetBucketFromInput func(interface{}) (*string, bool)
+}
+
+// UpdateEndpointOptions provides the options for the UpdateEndpoint middleware setup.
+type UpdateEndpointOptions struct {
+ // Accessor are parameter accessors used by the middleware
+ Accessor UpdateEndpointParameterAccessor
+
+ // use path style
+ UsePathStyle bool
+
+ // use transfer acceleration
+ UseAccelerate bool
+
+ // indicates if an operation supports s3 transfer acceleration.
+ SupportsAccelerate bool
+
+ // use ARN region
+ UseARNRegion bool
+
+ // Indicates that the operation should target the s3-object-lambda endpoint.
+ // Used to direct operations that do not route based on an input ARN.
+ TargetS3ObjectLambda bool
+
+ // EndpointResolver used to resolve endpoints. This may be a custom endpoint resolver
+ EndpointResolver EndpointResolver
+
+ // EndpointResolverOptions used by endpoint resolver
+ EndpointResolverOptions EndpointResolverOptions
+
+ // DisableMultiRegionAccessPoints indicates multi-region access point support is disabled
+ DisableMultiRegionAccessPoints bool
+}
+
+// UpdateEndpoint adds the middleware to the middleware stack based on the UpdateEndpointOptions.
+func UpdateEndpoint(stack *middleware.Stack, options UpdateEndpointOptions) (err error) {
+ const serializerID = "OperationSerializer"
+
+ // initial arn look up middleware
+ err = stack.Initialize.Insert(&s3shared.ARNLookup{
+ GetARNValue: options.Accessor.GetBucketFromInput,
+ }, "legacyEndpointContextSetter", middleware.After)
+ if err != nil {
+ return err
+ }
+
+ // process arn
+ err = stack.Serialize.Insert(&processARNResource{
+ UseARNRegion: options.UseARNRegion,
+ UseAccelerate: options.UseAccelerate,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointResolverOptions,
+ DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
+ }, serializerID, middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ // process whether the operation requires the s3-object-lambda endpoint
+ // Occurs before operation serializer so that hostPrefix mutations
+ // can be handled correctly.
+ err = stack.Serialize.Insert(&s3ObjectLambdaEndpoint{
+ UseEndpoint: options.TargetS3ObjectLambda,
+ UseAccelerate: options.UseAccelerate,
+ EndpointResolver: options.EndpointResolver,
+ EndpointResolverOptions: options.EndpointResolverOptions,
+ }, serializerID, middleware.Before)
+ if err != nil {
+ return err
+ }
+
+ // remove bucket arn middleware
+ err = stack.Serialize.Insert(&removeBucketFromPathMiddleware{}, serializerID, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ // update endpoint to use options for path style and accelerate
+ err = stack.Serialize.Insert(&updateEndpoint{
+ usePathStyle: options.UsePathStyle,
+ getBucketFromInput: options.Accessor.GetBucketFromInput,
+ useAccelerate: options.UseAccelerate,
+ supportsAccelerate: options.SupportsAccelerate,
+ }, serializerID, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ return err
+}
+
+type updateEndpoint struct {
+ // path style options
+ usePathStyle bool
+ getBucketFromInput func(interface{}) (*string, bool)
+
+ // accelerate options
+ useAccelerate bool
+ supportsAccelerate bool
+}
+
+// ID returns the middleware ID.
+func (*updateEndpoint) ID() string {
+ return "S3:UpdateEndpoint"
+}
+
+func (u *updateEndpoint) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ // if arn was processed, skip this middleware
+ if _, ok := s3shared.GetARNResourceFromContext(ctx); ok {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ // skip this customization if host name is set as immutable
+ if smithyhttp.GetHostnameImmutable(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // check if accelerate is supported
+ if u.useAccelerate && !u.supportsAccelerate {
+ // accelerate is not supported, thus will be ignored
+ log.Println("Transfer acceleration is not supported for the operation, ignoring UseAccelerate.")
+ u.useAccelerate = false
+ }
+
+ // transfer acceleration is not supported with path style urls
+ if u.useAccelerate && u.usePathStyle {
+ log.Println("UseAccelerate is not compatible with UsePathStyle, ignoring UsePathStyle.")
+ u.usePathStyle = false
+ }
+
+ if u.getBucketFromInput != nil {
+ // Below customization only apply if bucket name is provided
+ bucket, ok := u.getBucketFromInput(in.Parameters)
+ if ok && bucket != nil {
+ region := awsmiddleware.GetRegion(ctx)
+ if err := u.updateEndpointFromConfig(req, *bucket, region); err != nil {
+ return out, metadata, err
+ }
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+func (u updateEndpoint) updateEndpointFromConfig(req *smithyhttp.Request, bucket string, region string) error {
+ // do nothing if path style is enforced
+ if u.usePathStyle {
+ return nil
+ }
+
+ if !hostCompatibleBucketName(req.URL, bucket) {
+ // bucket name must be valid to put into the host for accelerate operations.
+ // For non-accelerate operations the bucket name can stay in the path if
+ // not valid hostname.
+ var err error
+ if u.useAccelerate {
+ err = fmt.Errorf("bucket name %s is not compatible with S3", bucket)
+ }
+
+ // No-Op if not using accelerate.
+ return err
+ }
+
+ // accelerate is only supported if use path style is disabled
+ if u.useAccelerate {
+ parts := strings.Split(req.URL.Host, ".")
+ if len(parts) < 3 {
+ return fmt.Errorf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", req.URL.Host)
+ }
+
+ if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
+ parts[0] = "s3-accelerate"
+ }
+
+ for i := 1; i+1 < len(parts); i++ {
+ if strings.EqualFold(parts[i], region) {
+ parts = append(parts[:i], parts[i+1:]...)
+ break
+ }
+ }
+
+ // construct the url host
+ req.URL.Host = strings.Join(parts, ".")
+ }
+
+ // move bucket to follow virtual host style
+ moveBucketNameToHost(req.URL, bucket)
+ return nil
+}
+
+// updates endpoint to use virtual host styling
+func moveBucketNameToHost(u *url.URL, bucket string) {
+ u.Host = bucket + "." + u.Host
+ removeBucketFromPath(u, bucket)
+}
+
+// remove bucket from url
+func removeBucketFromPath(u *url.URL, bucket string) {
+ if strings.HasPrefix(u.Path, "/"+bucket) {
+ // modify url path
+ u.Path = strings.Replace(u.Path, "/"+bucket, "", 1)
+
+ // modify url raw path
+ u.RawPath = strings.Replace(u.RawPath, "/"+httpbinding.EscapePath(bucket, true), "", 1)
+ }
+
+ if u.Path == "" {
+ u.Path = "/"
+ }
+
+ if u.RawPath == "" {
+ u.RawPath = "/"
+ }
+}
+
+// hostCompatibleBucketName returns true if the request should
+// put the bucket in the host. This is false if the bucket is not
+// DNS compatible or the EndpointResolver resolves an aws.Endpoint with
+// HostnameImmutable member set to true.
+//
+// https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Endpoint.HostnameImmutable
+func hostCompatibleBucketName(u *url.URL, bucket string) bool {
+ // Bucket might be DNS compatible but dots in the hostname will fail
+ // certificate validation, so do not use host-style.
+ if u.Scheme == "https" && strings.Contains(bucket, ".") {
+ return false
+ }
+
+ // if the bucket is DNS compatible
+ return dnsCompatibleBucketName(bucket)
+}
+
+// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
+// Buckets created outside of the classic region MUST be DNS compatible.
+func dnsCompatibleBucketName(bucket string) bool {
+ if strings.Contains(bucket, "..") {
+ return false
+ }
+
+ // checks for `^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$` domain mapping
+ if !((bucket[0] > 96 && bucket[0] < 123) || (bucket[0] > 47 && bucket[0] < 58)) {
+ return false
+ }
+
+ for _, c := range bucket[1:] {
+ if !((c > 96 && c < 123) || (c > 47 && c < 58) || c == 46 || c == 45) {
+ return false
+ }
+ }
+
+ // checks for `^(\d+\.){3}\d+$` IPaddressing
+ v := strings.SplitN(bucket, ".", -1)
+ if len(v) == 4 {
+ for _, c := range bucket {
+ if !((c > 47 && c < 58) || c == 46) {
+ // we confirm that this is not a IP address
+ return true
+ }
+ }
+ // this is a IP address
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..3364028a8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go
@@ -0,0 +1,1091 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"
+ "github.com/aws/smithy-go/logging"
+ "regexp"
+ "strings"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+ // Logger is a logging implementation that log events should be sent to.
+ Logger logging.Logger
+
+ // LogDeprecated indicates that deprecated endpoints should be logged to the
+ // provided logger.
+ LogDeprecated bool
+
+ // ResolvedRegion is used to override the region to be resolved, rather then the
+ // using the value passed to the ResolveEndpoint method. This value is used by the
+ // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative
+ // name. You must not set this value directly in your application.
+ ResolvedRegion string
+
+ // DisableHTTPS informs the resolver to return an endpoint that does not use the
+ // HTTPS scheme.
+ DisableHTTPS bool
+
+ // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint.
+ UseDualStackEndpoint aws.DualStackEndpointState
+
+ // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+ UseFIPSEndpoint aws.FIPSEndpointState
+}
+
+func (o Options) GetResolvedRegion() string {
+ return o.ResolvedRegion
+}
+
+func (o Options) GetDisableHTTPS() bool {
+ return o.DisableHTTPS
+}
+
+func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState {
+ return o.UseDualStackEndpoint
+}
+
+func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState {
+ return o.UseFIPSEndpoint
+}
+
+func transformToSharedOptions(options Options) endpoints.Options {
+ return endpoints.Options{
+ Logger: options.Logger,
+ LogDeprecated: options.LogDeprecated,
+ ResolvedRegion: options.ResolvedRegion,
+ DisableHTTPS: options.DisableHTTPS,
+ UseDualStackEndpoint: options.UseDualStackEndpoint,
+ UseFIPSEndpoint: options.UseFIPSEndpoint,
+ }
+}
+
+// Resolver S3 endpoint resolver
+type Resolver struct {
+ partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+ if len(region) == 0 {
+ return endpoint, &aws.MissingRegionError{}
+ }
+
+ opt := transformToSharedOptions(options)
+ return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+}
+
+var partitionRegexp = struct {
+ Aws *regexp.Regexp
+ AwsCn *regexp.Regexp
+ AwsIso *regexp.Regexp
+ AwsIsoB *regexp.Regexp
+ AwsIsoE *regexp.Regexp
+ AwsIsoF *regexp.Regexp
+ AwsUsGov *regexp.Regexp
+}{
+
+ Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"),
+ AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+ AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+ AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
+ AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+}
+
+var defaultPartitions = endpoints.Partitions{
+ {
+ ID: "aws",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "s3.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.Aws,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "af-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "af-south-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.af-south-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ap-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-east-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-east-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-northeast-2",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-3",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-northeast-3",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ap-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-south-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-south-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ap-south-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-south-2",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-south-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-2",
+ }: endpoints.Endpoint{
+ Hostname: "s3.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-2",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-3",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-3",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-5",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-5",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-southeast-5.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-7",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-7",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ap-southeast-7.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "aws-global",
+ }: endpoints.Endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ca-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-central-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.ca-central-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ca-central-1",
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ca-central-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ca-central-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.ca-west-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.ca-west-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "eu-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-central-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.eu-central-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "eu-central-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-central-2",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.eu-central-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "eu-north-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-north-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.eu-north-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "eu-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-south-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.eu-south-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "eu-south-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-south-2",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.eu-south-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-west-2",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.eu-west-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-3",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-west-3",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.eu-west-3.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.ca-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.ca-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "fips-us-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.us-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "fips-us-east-2",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.us-east-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "fips-us-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.us-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "fips-us-west-2",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.us-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "il-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "il-central-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.il-central-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.me-central-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "me-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "me-south-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.me-south-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "mx-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "mx-central-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.mx-central-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "s3-external-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "sa-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "sa-east-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.us-east-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.us-east-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ }: endpoints.Endpoint{
+ Hostname: "s3.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.{region}.api.amazonwebservices.com.cn",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "s3.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsCn,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "cn-north-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "cn-north-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn",
+ },
+ endpoints.EndpointKey{
+ Region: "cn-northwest-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "cn-northwest-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn",
+ },
+ },
+ },
+ {
+ ID: "aws-iso",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "s3.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIso,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-iso-east-1",
+ }: endpoints.Endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-iso-east-1",
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-iso-east-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-iso-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-iso-west-1",
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov",
+ },
+ endpoints.EndpointKey{
+ Region: "us-iso-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+ },
+ },
+ },
+ {
+ ID: "aws-iso-b",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.{region}.sc2s.sgov.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "s3.{region}.sc2s.sgov.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoB,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "fips-us-isob-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-isob-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-isob-east-1",
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
+ },
+ endpoints.EndpointKey{
+ Region: "us-isob-east-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+ },
+ },
+ },
+ {
+ ID: "aws-iso-e",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "s3.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoE,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-f",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.{region}.csp.hci.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "s3.{region}.csp.hci.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoF,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-isof-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-isof-south-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-us-gov",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3-fips.dualstack.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "s3.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsUsGov,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "s3.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "s3-fips.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1",
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+}
+
+// GetDNSSuffix returns the dnsSuffix URL component for the given partition id
+func GetDNSSuffix(id string, options Options) (string, error) {
+ variant := transformToSharedOptions(options).GetEndpointVariant()
+ switch {
+ case strings.EqualFold(id, "aws"):
+ switch variant {
+ case endpoints.DualStackVariant:
+ return "amazonaws.com", nil
+
+ case endpoints.FIPSVariant:
+ return "amazonaws.com", nil
+
+ case endpoints.FIPSVariant | endpoints.DualStackVariant:
+ return "amazonaws.com", nil
+
+ case 0:
+ return "amazonaws.com", nil
+
+ default:
+ return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id)
+
+ }
+
+ case strings.EqualFold(id, "aws-cn"):
+ switch variant {
+ case endpoints.DualStackVariant:
+ return "amazonaws.com.cn", nil
+
+ case endpoints.FIPSVariant:
+ return "amazonaws.com.cn", nil
+
+ case endpoints.FIPSVariant | endpoints.DualStackVariant:
+ return "api.amazonwebservices.com.cn", nil
+
+ case 0:
+ return "amazonaws.com.cn", nil
+
+ default:
+ return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id)
+
+ }
+
+ case strings.EqualFold(id, "aws-iso"):
+ switch variant {
+ case endpoints.FIPSVariant:
+ return "c2s.ic.gov", nil
+
+ case 0:
+ return "c2s.ic.gov", nil
+
+ default:
+ return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id)
+
+ }
+
+ case strings.EqualFold(id, "aws-iso-b"):
+ switch variant {
+ case endpoints.FIPSVariant:
+ return "sc2s.sgov.gov", nil
+
+ case 0:
+ return "sc2s.sgov.gov", nil
+
+ default:
+ return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id)
+
+ }
+
+ case strings.EqualFold(id, "aws-iso-e"):
+ switch variant {
+ case endpoints.FIPSVariant:
+ return "cloud.adc-e.uk", nil
+
+ case 0:
+ return "cloud.adc-e.uk", nil
+
+ default:
+ return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id)
+
+ }
+
+ case strings.EqualFold(id, "aws-iso-f"):
+ switch variant {
+ case endpoints.FIPSVariant:
+ return "csp.hci.ic.gov", nil
+
+ case 0:
+ return "csp.hci.ic.gov", nil
+
+ default:
+ return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id)
+
+ }
+
+ case strings.EqualFold(id, "aws-us-gov"):
+ switch variant {
+ case endpoints.DualStackVariant:
+ return "amazonaws.com", nil
+
+ case endpoints.FIPSVariant:
+ return "amazonaws.com", nil
+
+ case endpoints.FIPSVariant | endpoints.DualStackVariant:
+ return "amazonaws.com", nil
+
+ case 0:
+ return "amazonaws.com", nil
+
+ default:
+ return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id)
+
+ }
+
+ default:
+ return "", fmt.Errorf("unknown partition")
+
+ }
+}
+
+// GetDNSSuffixFromRegion returns the DNS suffix for the provided region and
+// options.
+func GetDNSSuffixFromRegion(region string, options Options) (string, error) {
+ switch {
+ case partitionRegexp.Aws.MatchString(region):
+ return GetDNSSuffix("aws", options)
+
+ case partitionRegexp.AwsCn.MatchString(region):
+ return GetDNSSuffix("aws-cn", options)
+
+ case partitionRegexp.AwsIso.MatchString(region):
+ return GetDNSSuffix("aws-iso", options)
+
+ case partitionRegexp.AwsIsoB.MatchString(region):
+ return GetDNSSuffix("aws-iso-b", options)
+
+ case partitionRegexp.AwsIsoE.MatchString(region):
+ return GetDNSSuffix("aws-iso-e", options)
+
+ case partitionRegexp.AwsIsoF.MatchString(region):
+ return GetDNSSuffix("aws-iso-f", options)
+
+ case partitionRegexp.AwsUsGov.MatchString(region):
+ return GetDNSSuffix("aws-us-gov", options)
+
+ default:
+ return GetDNSSuffix("aws", options)
+
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go
new file mode 100644
index 000000000..6f29b807f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go
@@ -0,0 +1,339 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ "github.com/aws/aws-sdk-go-v2/internal/v4a"
+ s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+)
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // The optional application specific identifier appended to the User-Agent header.
+ AppID string
+
+ // This endpoint will be given as input to an EndpointResolverV2. It is used for
+ // providing a custom base endpoint that is subject to modifications by the
+ // processing EndpointResolverV2.
+ BaseEndpoint *string
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The threshold ContentLength in bytes for HTTP PUT request to receive {Expect:
+ // 100-continue} header. Setting to -1 will disable adding the Expect header to
+ // requests; setting to 0 will set the threshold to default 2MB
+ ContinueHeaderThresholdBytes int64
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The configuration DefaultsMode that the SDK should use when constructing the
+ // clients initial default settings.
+ DefaultsMode aws.DefaultsMode
+
+ // Disables logging when the client skips output checksum validation due to lack
+ // of algorithm support.
+ DisableLogOutputChecksumValidationSkipped bool
+
+ // Allows you to disable S3 Multi-Region access points feature.
+ DisableMultiRegionAccessPoints bool
+
+ // Disables this client's usage of Session Auth for S3Express buckets and reverts
+ // to using conventional SigV4 for those.
+ DisableS3ExpressSessionAuth *bool
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ //
+ // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+ // value for this field will likely prevent you from using any endpoint-related
+ // service features released after the introduction of EndpointResolverV2 and
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
+ EndpointResolver EndpointResolver
+
+ // Resolves the endpoint used for a particular service operation. This should be
+ // used over the deprecated EndpointResolver.
+ EndpointResolverV2 EndpointResolverV2
+
+ // The credentials provider for S3Express requests.
+ ExpressCredentials ExpressCredentialsProvider
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The client meter provider.
+ MeterProvider metrics.MeterProvider
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // Indicates how user opt-in/out request checksum calculation
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+
+ // Indicates how user opt-in/out response checksum validation
+ ResponseChecksumValidation aws.ResponseChecksumValidation
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client will call
+ // an operation that fails with a retryable error. A value of 0 is ignored, and
+ // will not be used to configure the API client created default retryer, or modify
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry mode the API client will be created with, if
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ RetryMode aws.RetryMode
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer. The kind of
+ // default retry created by the API client can be changed with the RetryMode
+ // option.
+ Retryer aws.Retryer
+
+ // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+ // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+ // should not populate this structure programmatically, or rely on the values here
+ // within your applications.
+ RuntimeEnvironment aws.RuntimeEnvironment
+
+ // The client tracer provider.
+ TracerProvider tracing.TracerProvider
+
+ // Allows you to enable arn region support for the service.
+ UseARNRegion bool
+
+ // Allows you to enable S3 Accelerate feature. All operations compatible with S3
+ // Accelerate will use the accelerate endpoint for requests. Requests not
+ // compatible will fall back to normal S3 requests. The bucket must be enabled for
+ // accelerate to be used with S3 client with accelerate enabled. If the bucket is
+ // not enabled for accelerate an error will be returned. The bucket name must be
+ // DNS compatible to work with accelerate.
+ UseAccelerate bool
+
+ // Allows you to enable dual-stack endpoint support for the service.
+ //
+ // Deprecated: Set dual-stack by setting UseDualStackEndpoint on
+ // EndpointResolverOptions. When EndpointResolverOptions' UseDualStackEndpoint
+ // field is set it overrides this field value.
+ UseDualstack bool
+
+ // Allows you to enable the client to use path-style addressing, i.e.,
+ // https://s3.amazonaws.com/BUCKET/KEY . By default, the S3 client will use virtual
+ // hosted bucket addressing when possible( https://BUCKET.s3.amazonaws.com/KEY ).
+ UsePathStyle bool
+
+ // Signature Version 4a (SigV4a) Signer
+ httpSignerV4a httpSignerV4a
+
+ // The initial DefaultsMode used when the client options were constructed. If the
+ // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ resolvedDefaultsMode aws.DefaultsMode
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // The auth scheme resolver which determines how to authenticate for each
+ // operation.
+ AuthSchemeResolver AuthSchemeResolver
+
+ // The list of auth schemes supported by the client.
+ AuthSchemes []smithyhttp.AuthScheme
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+
+ return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+ if schemeID == "aws.auth#sigv4" {
+ return getSigV4IdentityResolver(o)
+ }
+ if schemeID == "com.amazonaws.s3#sigv4express" {
+ return getExpressIdentityResolver(o)
+ }
+ if schemeID == "aws.auth#sigv4a" {
+ return getSigV4AIdentityResolver(o)
+ }
+ if schemeID == "smithy.api#noAuth" {
+ return &smithyauth.AnonymousIdentityResolver{}
+ }
+ return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolverV2 = v
+ }
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.Credentials != nil {
+ return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+ }
+ return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+func getSigV4AIdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.Credentials != nil {
+ return &v4a.CredentialsProviderAdapter{
+ Provider: &v4a.SymmetricCredentialAdaptor{
+ SymmetricProvider: o.Credentials,
+ },
+ }
+ }
+ return nil
+}
+
+// WithSigV4ASigningRegions applies an override to the authentication workflow to
+// use the given signing region set for SigV4A-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region set from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4ASigningRegions(regions []string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+ ) {
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, regions)
+ return next.HandleFinalize(ctx, in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Finalize.Insert(
+ middleware.FinalizeMiddlewareFunc("withSigV4ASigningRegions", fn),
+ "Signing",
+ middleware.Before,
+ )
+ })
+ }
+}
+
+func ignoreAnonymousAuth(options *Options) {
+ if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+ options.Credentials = nil
+ }
+}
+
+func getExpressIdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.ExpressCredentials != nil {
+ return &s3cust.ExpressIdentityResolver{Provider: o.ExpressCredentials}
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go
new file mode 100644
index 000000000..491ed2e5d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go
@@ -0,0 +1,419 @@
+package s3
+
+import (
+ "context"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"
+ presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const (
+ algorithmHeader = "X-Amz-Algorithm"
+ credentialHeader = "X-Amz-Credential"
+ dateHeader = "X-Amz-Date"
+ tokenHeader = "X-Amz-Security-Token"
+ signatureHeader = "X-Amz-Signature"
+
+ algorithm = "AWS4-HMAC-SHA256"
+ aws4Request = "aws4_request"
+ bucketHeader = "bucket"
+ defaultExpiresIn = 15 * time.Minute
+ shortDateLayout = "20060102"
+)
+
+// PresignPostObject is a special kind of [presigned request] used to send a request using
+// form data, likely from an HTML form on a browser.
+// Unlike other presigned operations, the return values of this function are not meant to be used directly
+// to make an HTTP request but rather to be used as inputs to a form. See [the docs] for more information
+// on how to use these values
+//
+// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html
+// [the docs] https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
+func (c *PresignClient) PresignPostObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignPostOptions)) (*PresignedPostRequest, error) {
+ if params == nil {
+ params = &PutObjectInput{}
+ }
+ clientOptions := c.options.copy()
+ options := PresignPostOptions{
+ Expires: clientOptions.Expires,
+ PostPresigner: &postSignAdapter{},
+ }
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(clientOptions.ClientOptions, withNopHTTPClientAPIOption)
+ cvt := presignPostConverter(options)
+ result, _, err := c.client.invokeOperation(ctx, "$type:L", params, clientOptFns,
+ c.client.addOperationPutObjectMiddlewares,
+ cvt.ConvertToPresignMiddleware,
+ func(stack *middleware.Stack, options Options) error {
+ return awshttp.RemoveContentTypeHeader(stack)
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PresignedPostRequest)
+ return out, nil
+}
+
+// PresignedPostRequest represents a presigned request to be sent using HTTP verb POST and FormData
+type PresignedPostRequest struct {
+ // Represents the Base URL to make a request to
+ URL string
+ // Values is a key-value map of values to be sent as FormData
+ // these values are not encoded
+ Values map[string]string
+}
+
+// postSignAdapter adapter to implement the presignPost interface
+type postSignAdapter struct{}
+
+// PresignPost creates a special kind of [presigned request]
+// to be used with HTTP verb POST.
+// It differs from PUT request mostly on
+// 1. It accepts a new set of parameters, `Conditions[]`, that are used to create a policy doc to limit where an object can be posted to
+// 2. The return value needs to have more processing since it's meant to be sent via a form and not stand on its own
+// 3. There's no body to be signed, since that will be attached when the actual request is made
+// 4. The signature is made based on the policy document, not the whole request
+// More information can be found at https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
+//
+// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html
+func (s *postSignAdapter) PresignPost(
+ credentials aws.Credentials,
+ bucket string, key string,
+ region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, optFns ...func(*v4.SignerOptions),
+) (fields map[string]string, err error) {
+ credentialScope := buildCredentialScope(signingTime, region, service)
+ credentialStr := credentials.AccessKeyID + "/" + credentialScope
+
+ policyDoc, err := createPolicyDocument(expirationTime, signingTime, bucket, key, credentialStr, &credentials.SessionToken, conditions)
+ if err != nil {
+ return nil, err
+ }
+
+ signature := buildSignature(policyDoc, credentials.SecretAccessKey, service, region, signingTime)
+
+ fields = getPostSignRequiredFields(signingTime, credentialStr, credentials)
+ fields[signatureHeader] = signature
+ fields["key"] = key
+ fields["policy"] = policyDoc
+
+ return fields, nil
+}
+
+func getPostSignRequiredFields(t time.Time, credentialStr string, awsCredentials aws.Credentials) map[string]string {
+ fields := map[string]string{
+ algorithmHeader: algorithm,
+ dateHeader: t.UTC().Format("20060102T150405Z"),
+ credentialHeader: credentialStr,
+ }
+
+ sessionToken := awsCredentials.SessionToken
+ if len(sessionToken) > 0 {
+ fields[tokenHeader] = sessionToken
+ }
+
+ return fields
+}
+
+// PresignPost defines the interface to presign a POST request
+type PresignPost interface {
+ PresignPost(
+ credentials aws.Credentials,
+ bucket string, key string,
+ region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time,
+ optFns ...func(*v4.SignerOptions),
+ ) (fields map[string]string, err error)
+}
+
+// PresignPostOptions represent the options to be passed to a PresignPost sign request
+type PresignPostOptions struct {
+
+ // ClientOptions are list of functional options to mutate client options used by
+ // the presign client.
+ ClientOptions []func(*Options)
+
+ // PostPresigner to use. One will be created if none is provided
+ PostPresigner PresignPost
+
+ // Expires sets the expiration duration for the generated presign url. This should
+ // be the duration in seconds the presigned URL should be considered valid for. If
+ // not set or set to zero, presign url would default to expire after 900 seconds.
+ Expires time.Duration
+
+ // Conditions a list of extra conditions to pass to the policy document
+ // Available conditions can be found [here]
+ //
+ // [here]https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html#sigv4-PolicyConditions
+ Conditions []interface{}
+}
+
+type presignPostConverter PresignPostOptions
+
+// presignPostRequestMiddlewareOptions is the options for the presignPostRequestMiddleware middleware.
+type presignPostRequestMiddlewareOptions struct {
+ CredentialsProvider aws.CredentialsProvider
+ Presigner PresignPost
+ LogSigning bool
+ ExpiresIn time.Duration
+ Conditions []interface{}
+}
+
+type presignPostRequestMiddleware struct {
+ credentialsProvider aws.CredentialsProvider
+ presigner PresignPost
+ logSigning bool
+ expiresIn time.Duration
+ conditions []interface{}
+}
+
+// newPresignPostRequestMiddleware returns a new presignPostRequestMiddleware
+// initialized with the presigner.
+func newPresignPostRequestMiddleware(options presignPostRequestMiddlewareOptions) *presignPostRequestMiddleware {
+ return &presignPostRequestMiddleware{
+ credentialsProvider: options.CredentialsProvider,
+ presigner: options.Presigner,
+ logSigning: options.LogSigning,
+ expiresIn: options.ExpiresIn,
+ conditions: options.Conditions,
+ }
+}
+
+// ID provides the middleware ID.
+func (*presignPostRequestMiddleware) ID() string { return "PresignPostRequestMiddleware" }
+
+// HandleFinalize will take the provided input and create a presigned url for
+// the http request using the SigV4 presign authentication scheme.
+//
+// Since the signed request is not a valid HTTP request
+func (s *presignPostRequestMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+
+ input := getOperationInput(ctx)
+ asS3Put, ok := input.(*PutObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("expected PutObjectInput")
+ }
+ bucketName, ok := asS3Put.bucket()
+ if !ok {
+ return out, metadata, fmt.Errorf("requested input bucketName not found on request")
+ }
+ uploadKey := asS3Put.Key
+ if uploadKey == nil {
+ return out, metadata, fmt.Errorf("PutObject input does not have a key input")
+ }
+
+ uri := getS3ResolvedURI(ctx)
+
+ signingName := awsmiddleware.GetSigningName(ctx)
+ signingRegion := awsmiddleware.GetSigningRegion(ctx)
+
+ credentials, err := s.credentialsProvider.Retrieve(ctx)
+ if err != nil {
+ return out, metadata, &v4.SigningError{
+ Err: fmt.Errorf("failed to retrieve credentials: %w", err),
+ }
+ }
+ skew := internalcontext.GetAttemptSkewContext(ctx)
+ signingTime := sdk.NowTime().Add(skew)
+ expirationTime := signingTime.Add(s.expiresIn).UTC()
+
+ fields, err := s.presigner.PresignPost(
+ credentials,
+ bucketName,
+ *uploadKey,
+ signingRegion,
+ signingName,
+ signingTime,
+ s.conditions,
+ expirationTime,
+ func(o *v4.SignerOptions) {
+ o.Logger = middleware.GetLogger(ctx)
+ o.LogSigning = s.logSigning
+ })
+ if err != nil {
+ return out, metadata, &v4.SigningError{
+ Err: fmt.Errorf("failed to sign http request, %w", err),
+ }
+ }
+
+ out.Result = &PresignedPostRequest{
+ URL: uri,
+ Values: fields,
+ }
+
+ return out, metadata, nil
+}
+
+// Adapted from existing PresignConverter middleware
+func (c presignPostConverter) ConvertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) {
+ stack.Build.Remove("UserAgent")
+ stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID())
+ stack.Finalize.Remove((*retry.Attempt)(nil).ID())
+ stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID())
+ stack.Deserialize.Clear()
+
+ if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil {
+ return err
+ }
+
+ // if no expiration is set, set one
+ expiresIn := c.Expires
+ if expiresIn == 0 {
+ expiresIn = defaultExpiresIn
+ }
+
+ pmw := newPresignPostRequestMiddleware(presignPostRequestMiddlewareOptions{
+ CredentialsProvider: options.Credentials,
+ Presigner: c.PostPresigner,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ ExpiresIn: expiresIn,
+ Conditions: c.Conditions,
+ })
+ if _, err := stack.Finalize.Swap("Signing", pmw); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil {
+ return err
+ }
+ err = presignedurlcust.AddAsIsPresigningMiddleware(stack)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func createPolicyDocument(expirationTime time.Time, signingTime time.Time, bucket string, key string, credentialString string, securityToken *string, extraConditions []interface{}) (string, error) {
+ initialConditions := []interface{}{
+ map[string]string{
+ algorithmHeader: algorithm,
+ },
+ map[string]string{
+ bucketHeader: bucket,
+ },
+ map[string]string{
+ credentialHeader: credentialString,
+ },
+ map[string]string{
+ dateHeader: signingTime.UTC().Format("20060102T150405Z"),
+ },
+ }
+
+ var conditions []interface{}
+ for _, v := range initialConditions {
+ conditions = append(conditions, v)
+ }
+
+ if securityToken != nil && *securityToken != "" {
+ conditions = append(conditions, map[string]string{
+ tokenHeader: *securityToken,
+ })
+ }
+
+ // append user-defined conditions at the end
+ conditions = append(conditions, extraConditions...)
+
+ // The policy allows you to set a "key" value to specify what's the name of the
+ // key to add. Customers can add one by specifying one in their conditions,
+ // so we're checking if one has already been set.
+ // If none is found, restrict this to just the key name passed on the request
+ // This can be disabled by adding a condition that explicitly allows
+ // everything
+ if !isAlreadyCheckingForKey(conditions) {
+ conditions = append(conditions, map[string]string{"key": key})
+ }
+
+ policyDoc := map[string]interface{}{
+ "conditions": conditions,
+ "expiration": expirationTime.Format(time.RFC3339),
+ }
+
+ jsonBytes, err := json.Marshal(policyDoc)
+ if err != nil {
+ return "", err
+ }
+
+ return base64.StdEncoding.EncodeToString(jsonBytes), nil
+}
+
+func isAlreadyCheckingForKey(conditions []interface{}) bool {
+ // Need to check for two conditions:
+ // 1. A condition of the form ["starts-with", "$key", "mykey"]
+ // 2. A condition of the form {"key": "mykey"}
+ for _, c := range conditions {
+ slice, ok := c.([]interface{})
+ if ok && len(slice) > 1 {
+ if slice[0] == "starts-with" && slice[1] == "$key" {
+ return true
+ }
+ }
+ m, ok := c.(map[string]interface{})
+ if ok && len(m) > 0 {
+ for k := range m {
+ if k == "key" {
+ return true
+ }
+ }
+ }
+ // Repeat this but for map[string]string due to type constrains
+ ms, ok := c.(map[string]string)
+ if ok && len(ms) > 0 {
+ for k := range ms {
+ if k == "key" {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// these methods have been copied from v4 implementation since they are not exported for public use
+func hmacsha256(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func buildSignature(strToSign, secret, service, region string, t time.Time) string {
+ key := deriveKey(secret, service, region, t)
+ return hex.EncodeToString(hmacsha256(key, []byte(strToSign)))
+}
+
+func deriveKey(secret, service, region string, t time.Time) []byte {
+ hmacDate := hmacsha256([]byte("AWS4"+secret), []byte(t.UTC().Format(shortDateLayout)))
+ hmacRegion := hmacsha256(hmacDate, []byte(region))
+ hmacService := hmacsha256(hmacRegion, []byte(service))
+ return hmacsha256(hmacService, []byte(aws4Request))
+}
+
+func buildCredentialScope(signingTime time.Time, region, service string) string {
+ return strings.Join([]string{
+ signingTime.UTC().Format(shortDateLayout),
+ region,
+ service,
+ aws4Request,
+ }, "/")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go
new file mode 100644
index 000000000..4e34d1a22
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go
@@ -0,0 +1,77 @@
+package s3
+
+import (
+ "context"
+ "fmt"
+ "path"
+
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// serializeImmutableHostnameBucketMiddleware handles injecting the bucket name into
+// "immutable" hostnames resolved via v1 EndpointResolvers. This CANNOT be done in
+// serialization, since v2 endpoint resolution requires removing the {Bucket} path
+// segment from all S3 requests.
+//
+// This will only be done for non-ARN buckets, as the features that use those require
+// virtualhost manipulation to function and we previously (pre-ep2) expected the caller
+// to handle that in their resolver.
+type serializeImmutableHostnameBucketMiddleware struct {
+ UsePathStyle bool
+}
+
+func (*serializeImmutableHostnameBucketMiddleware) ID() string {
+ return "serializeImmutableHostnameBucket"
+}
+
+func (m *serializeImmutableHostnameBucketMiddleware) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ bucket, ok := bucketFromInput(in.Parameters)
+ if !ok {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ // a bucket being un-vhostable will also force us to use path style
+ usePathStyle := m.UsePathStyle || !awsrulesfn.IsVirtualHostableS3Bucket(bucket, request.URL.Scheme != "https")
+
+ if !smithyhttp.GetHostnameImmutable(ctx) &&
+ !(awsmiddleware.GetRequiresLegacyEndpoints(ctx) && usePathStyle) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ parsedBucket := awsrulesfn.ParseARN(bucket)
+
+ // disallow ARN buckets except for MRAP arns
+ if parsedBucket != nil && len(parsedBucket.Region) > 0 {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ request.URL.Path = path.Join(request.URL.Path, bucket)
+ request.URL.RawPath = path.Join(request.URL.RawPath, httpbinding.EscapePath(bucket, true))
+
+ return next.HandleSerialize(ctx, in)
+}
+
+func addSerializeImmutableHostnameBucketMiddleware(stack *middleware.Stack, options Options) error {
+ return stack.Serialize.Insert(
+ &serializeImmutableHostnameBucketMiddleware{
+ UsePathStyle: options.UsePathStyle,
+ },
+ "OperationSerializer",
+ middleware.Before,
+ )
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go
new file mode 100644
index 000000000..c37fab1e1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go
@@ -0,0 +1,13910 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ "github.com/aws/smithy-go/middleware"
+ smithytime "github.com/aws/smithy-go/time"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+type awsRestxml_serializeOpAbortMultipartUpload struct {
+}
+
+func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AbortMultipartUploadInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=AbortMultipartUpload")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.IfMatchInitiatedTime != nil {
+ locationName := "X-Amz-If-Match-Initiated-Time"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchInitiatedTime))
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCompleteMultipartUpload struct {
+}
+
+func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CompleteMultipartUploadInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.MultipartUpload != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CompleteMultipartUpload",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentCompletedMultipartUpload(input.MultipartUpload, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ChecksumCRC32 != nil {
+ locationName := "X-Amz-Checksum-Crc32"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC32)
+ }
+
+ if v.ChecksumCRC32C != nil {
+ locationName := "X-Amz-Checksum-Crc32c"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC32C)
+ }
+
+ if v.ChecksumCRC64NVME != nil {
+ locationName := "X-Amz-Checksum-Crc64nvme"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME)
+ }
+
+ if v.ChecksumSHA1 != nil {
+ locationName := "X-Amz-Checksum-Sha1"
+ encoder.SetHeader(locationName).String(*v.ChecksumSHA1)
+ }
+
+ if v.ChecksumSHA256 != nil {
+ locationName := "X-Amz-Checksum-Sha256"
+ encoder.SetHeader(locationName).String(*v.ChecksumSHA256)
+ }
+
+ if len(v.ChecksumType) > 0 {
+ locationName := "X-Amz-Checksum-Type"
+ encoder.SetHeader(locationName).String(string(v.ChecksumType))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.IfMatch != nil {
+ locationName := "If-Match"
+ encoder.SetHeader(locationName).String(*v.IfMatch)
+ }
+
+ if v.IfNoneMatch != nil {
+ locationName := "If-None-Match"
+ encoder.SetHeader(locationName).String(*v.IfNoneMatch)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.MpuObjectSize != nil {
+ locationName := "X-Amz-Mp-Object-Size"
+ encoder.SetHeader(locationName).Long(*v.MpuObjectSize)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCopyObject struct {
+}
+
+func (*awsRestxml_serializeOpCopyObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CopyObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CopyObject")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCopyObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.BucketKeyEnabled != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled"
+ encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled)
+ }
+
+ if v.CacheControl != nil {
+ locationName := "Cache-Control"
+ encoder.SetHeader(locationName).String(*v.CacheControl)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentDisposition != nil {
+ locationName := "Content-Disposition"
+ encoder.SetHeader(locationName).String(*v.ContentDisposition)
+ }
+
+ if v.ContentEncoding != nil {
+ locationName := "Content-Encoding"
+ encoder.SetHeader(locationName).String(*v.ContentEncoding)
+ }
+
+ if v.ContentLanguage != nil {
+ locationName := "Content-Language"
+ encoder.SetHeader(locationName).String(*v.ContentLanguage)
+ }
+
+ if v.ContentType != nil {
+ locationName := "Content-Type"
+ encoder.SetHeader(locationName).String(*v.ContentType)
+ }
+
+ if v.CopySource != nil {
+ locationName := "X-Amz-Copy-Source"
+ encoder.SetHeader(locationName).String(*v.CopySource)
+ }
+
+ if v.CopySourceIfMatch != nil {
+ locationName := "X-Amz-Copy-Source-If-Match"
+ encoder.SetHeader(locationName).String(*v.CopySourceIfMatch)
+ }
+
+ if v.CopySourceIfModifiedSince != nil {
+ locationName := "X-Amz-Copy-Source-If-Modified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince))
+ }
+
+ if v.CopySourceIfNoneMatch != nil {
+ locationName := "X-Amz-Copy-Source-If-None-Match"
+ encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch)
+ }
+
+ if v.CopySourceIfUnmodifiedSince != nil {
+ locationName := "X-Amz-Copy-Source-If-Unmodified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince))
+ }
+
+ if v.CopySourceSSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm)
+ }
+
+ if v.CopySourceSSECustomerKey != nil {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey)
+ }
+
+ if v.CopySourceSSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.ExpectedSourceBucketOwner != nil {
+ locationName := "X-Amz-Source-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner)
+ }
+
+ if v.Expires != nil {
+ locationName := "Expires"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires))
+ }
+
+ if v.GrantFullControl != nil {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWriteACP != nil {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.Metadata != nil {
+ hv := encoder.Headers("X-Amz-Meta-")
+ for mapKey, mapVal := range v.Metadata {
+ hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal)
+ }
+ }
+
+ if len(v.MetadataDirective) > 0 {
+ locationName := "X-Amz-Metadata-Directive"
+ encoder.SetHeader(locationName).String(string(v.MetadataDirective))
+ }
+
+ if len(v.ObjectLockLegalHoldStatus) > 0 {
+ locationName := "X-Amz-Object-Lock-Legal-Hold"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus))
+ }
+
+ if len(v.ObjectLockMode) > 0 {
+ locationName := "X-Amz-Object-Lock-Mode"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockMode))
+ }
+
+ if v.ObjectLockRetainUntilDate != nil {
+ locationName := "X-Amz-Object-Lock-Retain-Until-Date"
+ encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate))
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if len(v.ServerSideEncryption) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption"
+ encoder.SetHeader(locationName).String(string(v.ServerSideEncryption))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.SSEKMSEncryptionContext != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Context"
+ encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext)
+ }
+
+ if v.SSEKMSKeyId != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"
+ encoder.SetHeader(locationName).String(*v.SSEKMSKeyId)
+ }
+
+ if len(v.StorageClass) > 0 {
+ locationName := "X-Amz-Storage-Class"
+ encoder.SetHeader(locationName).String(string(v.StorageClass))
+ }
+
+ if v.Tagging != nil {
+ locationName := "X-Amz-Tagging"
+ encoder.SetHeader(locationName).String(*v.Tagging)
+ }
+
+ if len(v.TaggingDirective) > 0 {
+ locationName := "X-Amz-Tagging-Directive"
+ encoder.SetHeader(locationName).String(string(v.TaggingDirective))
+ }
+
+ if v.WebsiteRedirectLocation != nil {
+ locationName := "X-Amz-Website-Redirect-Location"
+ encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCreateBucket struct {
+}
+
+func (*awsRestxml_serializeOpCreateBucket) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateBucketInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCreateBucketInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.CreateBucketConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CreateBucketConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentCreateBucketConfiguration(input.CreateBucketConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.GrantFullControl != nil {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWrite != nil {
+ locationName := "X-Amz-Grant-Write"
+ encoder.SetHeader(locationName).String(*v.GrantWrite)
+ }
+
+ if v.GrantWriteACP != nil {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.ObjectLockEnabledForBucket != nil {
+ locationName := "X-Amz-Bucket-Object-Lock-Enabled"
+ encoder.SetHeader(locationName).Boolean(*v.ObjectLockEnabledForBucket)
+ }
+
+ if len(v.ObjectOwnership) > 0 {
+ locationName := "X-Amz-Object-Ownership"
+ encoder.SetHeader(locationName).String(string(v.ObjectOwnership))
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCreateBucketMetadataTableConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?metadataTable")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.MetadataTableConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "MetadataTableConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentMetadataTableConfiguration(input.MetadataTableConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCreateMultipartUpload struct {
+}
+
+func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateMultipartUploadInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.BucketKeyEnabled != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled"
+ encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled)
+ }
+
+ if v.CacheControl != nil {
+ locationName := "Cache-Control"
+ encoder.SetHeader(locationName).String(*v.CacheControl)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if len(v.ChecksumType) > 0 {
+ locationName := "X-Amz-Checksum-Type"
+ encoder.SetHeader(locationName).String(string(v.ChecksumType))
+ }
+
+ if v.ContentDisposition != nil {
+ locationName := "Content-Disposition"
+ encoder.SetHeader(locationName).String(*v.ContentDisposition)
+ }
+
+ if v.ContentEncoding != nil {
+ locationName := "Content-Encoding"
+ encoder.SetHeader(locationName).String(*v.ContentEncoding)
+ }
+
+ if v.ContentLanguage != nil {
+ locationName := "Content-Language"
+ encoder.SetHeader(locationName).String(*v.ContentLanguage)
+ }
+
+ if v.ContentType != nil {
+ locationName := "Content-Type"
+ encoder.SetHeader(locationName).String(*v.ContentType)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Expires != nil {
+ locationName := "Expires"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires))
+ }
+
+ if v.GrantFullControl != nil {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWriteACP != nil {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.Metadata != nil {
+ hv := encoder.Headers("X-Amz-Meta-")
+ for mapKey, mapVal := range v.Metadata {
+ hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal)
+ }
+ }
+
+ if len(v.ObjectLockLegalHoldStatus) > 0 {
+ locationName := "X-Amz-Object-Lock-Legal-Hold"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus))
+ }
+
+ if len(v.ObjectLockMode) > 0 {
+ locationName := "X-Amz-Object-Lock-Mode"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockMode))
+ }
+
+ if v.ObjectLockRetainUntilDate != nil {
+ locationName := "X-Amz-Object-Lock-Retain-Until-Date"
+ encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate))
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if len(v.ServerSideEncryption) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption"
+ encoder.SetHeader(locationName).String(string(v.ServerSideEncryption))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.SSEKMSEncryptionContext != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Context"
+ encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext)
+ }
+
+ if v.SSEKMSKeyId != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"
+ encoder.SetHeader(locationName).String(*v.SSEKMSKeyId)
+ }
+
+ if len(v.StorageClass) > 0 {
+ locationName := "X-Amz-Storage-Class"
+ encoder.SetHeader(locationName).String(string(v.StorageClass))
+ }
+
+ if v.Tagging != nil {
+ locationName := "X-Amz-Tagging"
+ encoder.SetHeader(locationName).String(*v.Tagging)
+ }
+
+ if v.WebsiteRedirectLocation != nil {
+ locationName := "X-Amz-Website-Redirect-Location"
+ encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpCreateSession struct {
+}
+
+func (*awsRestxml_serializeOpCreateSession) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateSessionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?session")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsCreateSessionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.BucketKeyEnabled != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled"
+ encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled)
+ }
+
+ if len(v.ServerSideEncryption) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption"
+ encoder.SetHeader(locationName).String(string(v.ServerSideEncryption))
+ }
+
+ if len(v.SessionMode) > 0 {
+ locationName := "X-Amz-Create-Session-Mode"
+ encoder.SetHeader(locationName).String(string(v.SessionMode))
+ }
+
+ if v.SSEKMSEncryptionContext != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Context"
+ encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext)
+ }
+
+ if v.SSEKMSKeyId != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"
+ encoder.SetHeader(locationName).String(*v.SSEKMSKeyId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucket struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucket) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?analytics")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketCors struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketCors) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketCorsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?cors")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketEncryption struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketEncryptionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?encryption")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?inventory")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketLifecycle struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketLifecycleInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?lifecycle")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?metadataTable")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?metrics")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?ownershipControls")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketPolicy struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketPolicyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?policy")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketReplication struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketReplicationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?replication")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketTagging struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?tagging")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteBucketWebsite struct {
+}
+
+func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBucketWebsiteInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?website")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteObject struct {
+}
+
+func (*awsRestxml_serializeOpDeleteObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=DeleteObject")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.BypassGovernanceRetention != nil {
+ locationName := "X-Amz-Bypass-Governance-Retention"
+ encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.IfMatch != nil {
+ locationName := "If-Match"
+ encoder.SetHeader(locationName).String(*v.IfMatch)
+ }
+
+ if v.IfMatchLastModifiedTime != nil {
+ locationName := "X-Amz-If-Match-Last-Modified-Time"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchLastModifiedTime))
+ }
+
+ if v.IfMatchSize != nil {
+ locationName := "X-Amz-If-Match-Size"
+ encoder.SetHeader(locationName).Long(*v.IfMatchSize)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.MFA != nil {
+ locationName := "X-Amz-Mfa"
+ encoder.SetHeader(locationName).String(*v.MFA)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteObjects struct {
+}
+
+func (*awsRestxml_serializeOpDeleteObjects) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteObjectsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?delete")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Delete != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Delete",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentDelete(input.Delete, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.BypassGovernanceRetention != nil {
+ locationName := "X-Amz-Bypass-Governance-Retention"
+ encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.MFA != nil {
+ locationName := "X-Amz-Mfa"
+ encoder.SetHeader(locationName).String(*v.MFA)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeleteObjectTagging struct {
+}
+
+func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteObjectTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpDeletePublicAccessBlock struct {
+}
+
+func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeletePublicAccessBlockInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "DELETE"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketAccelerateConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?accelerate")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketAcl struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketAcl) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketAclInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?acl")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketAclInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=GetBucketAnalyticsConfiguration")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketCors struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketCors) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketCorsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?cors")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketEncryption struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketEncryption) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketEncryptionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?encryption")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=GetBucketInventoryConfiguration")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketLifecycleConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?lifecycle")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketLocation struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketLocation) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketLocationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?location")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketLogging struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketLogging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketLoggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?logging")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketMetadataTableConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketMetadataTableConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?metadataTable")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=GetBucketMetricsConfiguration")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketNotificationConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?notification")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketOwnershipControlsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?ownershipControls")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketPolicy struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketPolicy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketPolicyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?policy")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketPolicyStatus struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketPolicyStatusInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?policyStatus")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketReplication struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketReplication) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketReplicationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?replication")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketRequestPayment struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketRequestPaymentInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?requestPayment")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketTagging struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?tagging")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketVersioning struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketVersioning) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketVersioningInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?versioning")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetBucketWebsite struct {
+}
+
+func (*awsRestxml_serializeOpGetBucketWebsite) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetBucketWebsiteInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?website")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObject struct {
+}
+
+func (*awsRestxml_serializeOpGetObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=GetObject")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumMode) > 0 {
+ locationName := "X-Amz-Checksum-Mode"
+ encoder.SetHeader(locationName).String(string(v.ChecksumMode))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.IfMatch != nil {
+ locationName := "If-Match"
+ encoder.SetHeader(locationName).String(*v.IfMatch)
+ }
+
+ if v.IfModifiedSince != nil {
+ locationName := "If-Modified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince))
+ }
+
+ if v.IfNoneMatch != nil {
+ locationName := "If-None-Match"
+ encoder.SetHeader(locationName).String(*v.IfNoneMatch)
+ }
+
+ if v.IfUnmodifiedSince != nil {
+ locationName := "If-Unmodified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince))
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.PartNumber != nil {
+ encoder.SetQuery("partNumber").Integer(*v.PartNumber)
+ }
+
+ if v.Range != nil {
+ locationName := "Range"
+ encoder.SetHeader(locationName).String(*v.Range)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.ResponseCacheControl != nil {
+ encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl)
+ }
+
+ if v.ResponseContentDisposition != nil {
+ encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition)
+ }
+
+ if v.ResponseContentEncoding != nil {
+ encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding)
+ }
+
+ if v.ResponseContentLanguage != nil {
+ encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage)
+ }
+
+ if v.ResponseContentType != nil {
+ encoder.SetQuery("response-content-type").String(*v.ResponseContentType)
+ }
+
+ if v.ResponseExpires != nil {
+ encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectAcl struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectAcl) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectAclInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectAclInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectAttributes struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectAttributes) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectAttributesInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?attributes")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttributesInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.MaxParts != nil {
+ locationName := "X-Amz-Max-Parts"
+ encoder.SetHeader(locationName).Integer(*v.MaxParts)
+ }
+
+ if v.ObjectAttributes != nil {
+ locationName := "X-Amz-Object-Attributes"
+ if len(v.ObjectAttributes) == 0 {
+ encoder.AddHeader(locationName).String("")
+ }
+ for i := range v.ObjectAttributes {
+ if len(v.ObjectAttributes[i]) > 0 {
+ escaped := string(v.ObjectAttributes[i])
+ if strings.Index(string(v.ObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.ObjectAttributes[i]), `"`) != -1 {
+ escaped = strconv.Quote(string(v.ObjectAttributes[i]))
+ }
+
+ encoder.AddHeader(locationName).String(string(escaped))
+ }
+ }
+ }
+
+ if v.PartNumberMarker != nil {
+ locationName := "X-Amz-Part-Number-Marker"
+ encoder.SetHeader(locationName).String(*v.PartNumberMarker)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectLegalHold struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectLegalHoldInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectLockConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectLockConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?object-lock")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectRetention struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectRetention) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectRetentionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectTagging struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetObjectTorrent struct {
+}
+
+func (*awsRestxml_serializeOpGetObjectTorrent) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetObjectTorrentInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?torrent")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpGetPublicAccessBlock struct {
+}
+
+func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetPublicAccessBlockInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpHeadBucket struct {
+}
+
+func (*awsRestxml_serializeOpHeadBucket) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*HeadBucketInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "HEAD"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsHeadBucketInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpHeadObject struct {
+}
+
+func (*awsRestxml_serializeOpHeadObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*HeadObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "HEAD"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsHeadObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumMode) > 0 {
+ locationName := "X-Amz-Checksum-Mode"
+ encoder.SetHeader(locationName).String(string(v.ChecksumMode))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.IfMatch != nil {
+ locationName := "If-Match"
+ encoder.SetHeader(locationName).String(*v.IfMatch)
+ }
+
+ if v.IfModifiedSince != nil {
+ locationName := "If-Modified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince))
+ }
+
+ if v.IfNoneMatch != nil {
+ locationName := "If-None-Match"
+ encoder.SetHeader(locationName).String(*v.IfNoneMatch)
+ }
+
+ if v.IfUnmodifiedSince != nil {
+ locationName := "If-Unmodified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfUnmodifiedSince))
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.PartNumber != nil {
+ encoder.SetQuery("partNumber").Integer(*v.PartNumber)
+ }
+
+ if v.Range != nil {
+ locationName := "Range"
+ encoder.SetHeader(locationName).String(*v.Range)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.ResponseCacheControl != nil {
+ encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl)
+ }
+
+ if v.ResponseContentDisposition != nil {
+ encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition)
+ }
+
+ if v.ResponseContentEncoding != nil {
+ encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding)
+ }
+
+ if v.ResponseContentLanguage != nil {
+ encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage)
+ }
+
+ if v.ResponseContentType != nil {
+ encoder.SetQuery("response-content-type").String(*v.ResponseContentType)
+ }
+
+ if v.ResponseExpires != nil {
+ encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBucketAnalyticsConfigurations struct {
+}
+
+func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=ListBucketAnalyticsConfigurations")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBucketIntelligentTieringConfigurations struct {
+}
+
+func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBucketInventoryConfigurations struct {
+}
+
+func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=ListBucketInventoryConfigurations")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBucketMetricsConfigurations struct {
+}
+
+func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=ListBucketMetricsConfigurations")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListBuckets struct {
+}
+
+func (*awsRestxml_serializeOpListBuckets) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBucketsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?x-id=ListBuckets")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListBucketsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.BucketRegion != nil {
+ encoder.SetQuery("bucket-region").String(*v.BucketRegion)
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.MaxBuckets != nil {
+ encoder.SetQuery("max-buckets").Integer(*v.MaxBuckets)
+ }
+
+ if v.Prefix != nil {
+ encoder.SetQuery("prefix").String(*v.Prefix)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListDirectoryBuckets struct {
+}
+
+func (*awsRestxml_serializeOpListDirectoryBuckets) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListDirectoryBucketsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?x-id=ListDirectoryBuckets")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(v *ListDirectoryBucketsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.MaxDirectoryBuckets != nil {
+ encoder.SetQuery("max-directory-buckets").Integer(*v.MaxDirectoryBuckets)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListMultipartUploads struct {
+}
+
+func (*awsRestxml_serializeOpListMultipartUploads) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListMultipartUploadsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?uploads")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Delimiter != nil {
+ encoder.SetQuery("delimiter").String(*v.Delimiter)
+ }
+
+ if len(v.EncodingType) > 0 {
+ encoder.SetQuery("encoding-type").String(string(v.EncodingType))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.KeyMarker != nil {
+ encoder.SetQuery("key-marker").String(*v.KeyMarker)
+ }
+
+ if v.MaxUploads != nil {
+ encoder.SetQuery("max-uploads").Integer(*v.MaxUploads)
+ }
+
+ if v.Prefix != nil {
+ encoder.SetQuery("prefix").String(*v.Prefix)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.UploadIdMarker != nil {
+ encoder.SetQuery("upload-id-marker").String(*v.UploadIdMarker)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListObjects struct {
+}
+
+func (*awsRestxml_serializeOpListObjects) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListObjectsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListObjectsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Delimiter != nil {
+ encoder.SetQuery("delimiter").String(*v.Delimiter)
+ }
+
+ if len(v.EncodingType) > 0 {
+ encoder.SetQuery("encoding-type").String(string(v.EncodingType))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Marker != nil {
+ encoder.SetQuery("marker").String(*v.Marker)
+ }
+
+ if v.MaxKeys != nil {
+ encoder.SetQuery("max-keys").Integer(*v.MaxKeys)
+ }
+
+ if v.OptionalObjectAttributes != nil {
+ locationName := "X-Amz-Optional-Object-Attributes"
+ if len(v.OptionalObjectAttributes) == 0 {
+ encoder.AddHeader(locationName).String("")
+ }
+ for i := range v.OptionalObjectAttributes {
+ if len(v.OptionalObjectAttributes[i]) > 0 {
+ escaped := string(v.OptionalObjectAttributes[i])
+ if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 {
+ escaped = strconv.Quote(string(v.OptionalObjectAttributes[i]))
+ }
+
+ encoder.AddHeader(locationName).String(string(escaped))
+ }
+ }
+ }
+
+ if v.Prefix != nil {
+ encoder.SetQuery("prefix").String(*v.Prefix)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListObjectsV2 struct {
+}
+
+func (*awsRestxml_serializeOpListObjectsV2) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListObjectsV2Input)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?list-type=2")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListObjectsV2Input(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ContinuationToken != nil {
+ encoder.SetQuery("continuation-token").String(*v.ContinuationToken)
+ }
+
+ if v.Delimiter != nil {
+ encoder.SetQuery("delimiter").String(*v.Delimiter)
+ }
+
+ if len(v.EncodingType) > 0 {
+ encoder.SetQuery("encoding-type").String(string(v.EncodingType))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.FetchOwner != nil {
+ encoder.SetQuery("fetch-owner").Boolean(*v.FetchOwner)
+ }
+
+ if v.MaxKeys != nil {
+ encoder.SetQuery("max-keys").Integer(*v.MaxKeys)
+ }
+
+ if v.OptionalObjectAttributes != nil {
+ locationName := "X-Amz-Optional-Object-Attributes"
+ if len(v.OptionalObjectAttributes) == 0 {
+ encoder.AddHeader(locationName).String("")
+ }
+ for i := range v.OptionalObjectAttributes {
+ if len(v.OptionalObjectAttributes[i]) > 0 {
+ escaped := string(v.OptionalObjectAttributes[i])
+ if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 {
+ escaped = strconv.Quote(string(v.OptionalObjectAttributes[i]))
+ }
+
+ encoder.AddHeader(locationName).String(string(escaped))
+ }
+ }
+ }
+
+ if v.Prefix != nil {
+ encoder.SetQuery("prefix").String(*v.Prefix)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.StartAfter != nil {
+ encoder.SetQuery("start-after").String(*v.StartAfter)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListObjectVersions struct {
+}
+
+func (*awsRestxml_serializeOpListObjectVersions) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListObjectVersionsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?versions")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Delimiter != nil {
+ encoder.SetQuery("delimiter").String(*v.Delimiter)
+ }
+
+ if len(v.EncodingType) > 0 {
+ encoder.SetQuery("encoding-type").String(string(v.EncodingType))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.KeyMarker != nil {
+ encoder.SetQuery("key-marker").String(*v.KeyMarker)
+ }
+
+ if v.MaxKeys != nil {
+ encoder.SetQuery("max-keys").Integer(*v.MaxKeys)
+ }
+
+ if v.OptionalObjectAttributes != nil {
+ locationName := "X-Amz-Optional-Object-Attributes"
+ if len(v.OptionalObjectAttributes) == 0 {
+ encoder.AddHeader(locationName).String("")
+ }
+ for i := range v.OptionalObjectAttributes {
+ if len(v.OptionalObjectAttributes[i]) > 0 {
+ escaped := string(v.OptionalObjectAttributes[i])
+ if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 {
+ escaped = strconv.Quote(string(v.OptionalObjectAttributes[i]))
+ }
+
+ encoder.AddHeader(locationName).String(string(escaped))
+ }
+ }
+ }
+
+ if v.Prefix != nil {
+ encoder.SetQuery("prefix").String(*v.Prefix)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionIdMarker != nil {
+ encoder.SetQuery("version-id-marker").String(*v.VersionIdMarker)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpListParts struct {
+}
+
+func (*awsRestxml_serializeOpListParts) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListPartsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=ListParts")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsListPartsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.MaxParts != nil {
+ encoder.SetQuery("max-parts").Integer(*v.MaxParts)
+ }
+
+ if v.PartNumberMarker != nil {
+ encoder.SetQuery("part-number-marker").String(*v.PartNumberMarker)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketAccelerateConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?accelerate")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.AccelerateConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccelerateConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentAccelerateConfiguration(input.AccelerateConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketAcl struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketAcl) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketAclInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?acl")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketAclInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.AccessControlPolicy != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlPolicy",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.GrantFullControl != nil {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWrite != nil {
+ locationName := "X-Amz-Grant-Write"
+ encoder.SetHeader(locationName).String(*v.GrantWrite)
+ }
+
+ if v.GrantWriteACP != nil {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketAnalyticsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?analytics")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.AnalyticsConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AnalyticsConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentAnalyticsConfiguration(input.AnalyticsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketCors struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketCors) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketCorsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?cors")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.CORSConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CORSConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentCORSConfiguration(input.CORSConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketEncryption struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketEncryption) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketEncryptionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?encryption")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.ServerSideEncryptionConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ServerSideEncryptionConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentServerSideEncryptionConfiguration(input.ServerSideEncryptionConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.IntelligentTieringConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IntelligentTieringConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentIntelligentTieringConfiguration(input.IntelligentTieringConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketInventoryConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?inventory")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.InventoryConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "InventoryConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentInventoryConfiguration(input.InventoryConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketLifecycleConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?lifecycle")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.LifecycleConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "LifecycleConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentBucketLifecycleConfiguration(input.LifecycleConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if len(v.TransitionDefaultMinimumObjectSize) > 0 {
+ locationName := "X-Amz-Transition-Default-Minimum-Object-Size"
+ encoder.SetHeader(locationName).String(string(v.TransitionDefaultMinimumObjectSize))
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketLogging struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketLogging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketLoggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?logging")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.BucketLoggingStatus != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BucketLoggingStatus",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentBucketLoggingStatus(input.BucketLoggingStatus, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketMetricsConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?metrics")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.MetricsConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "MetricsConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentMetricsConfiguration(input.MetricsConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Id != nil {
+ encoder.SetQuery("id").String(*v.Id)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketNotificationConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?notification")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.NotificationConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NotificationConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentNotificationConfiguration(input.NotificationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.SkipDestinationValidation != nil {
+ locationName := "X-Amz-Skip-Destination-Validation"
+ encoder.SetHeader(locationName).Boolean(*v.SkipDestinationValidation)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketOwnershipControls struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketOwnershipControlsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?ownershipControls")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.OwnershipControls != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OwnershipControls",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentOwnershipControls(input.OwnershipControls, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketPolicy struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketPolicy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketPolicyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?policy")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("text/plain")
+ }
+
+ if input.Policy != nil {
+ payload := strings.NewReader(*input.Policy)
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ConfirmRemoveSelfBucketAccess != nil {
+ locationName := "X-Amz-Confirm-Remove-Self-Bucket-Access"
+ encoder.SetHeader(locationName).Boolean(*v.ConfirmRemoveSelfBucketAccess)
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketReplication struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketReplication) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketReplicationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?replication")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.ReplicationConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplicationConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentReplicationConfiguration(input.ReplicationConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Token != nil {
+ locationName := "X-Amz-Bucket-Object-Lock-Token"
+ encoder.SetHeader(locationName).String(*v.Token)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketRequestPayment struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketRequestPaymentInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?requestPayment")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.RequestPaymentConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RequestPaymentConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentRequestPaymentConfiguration(input.RequestPaymentConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketTagging struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?tagging")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Tagging != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tagging",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketVersioning struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketVersioning) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketVersioningInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?versioning")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.VersioningConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "VersioningConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentVersioningConfiguration(input.VersioningConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.MFA != nil {
+ locationName := "X-Amz-Mfa"
+ encoder.SetHeader(locationName).String(*v.MFA)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutBucketWebsite struct {
+}
+
+func (*awsRestxml_serializeOpPutBucketWebsite) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutBucketWebsiteInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?website")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.WebsiteConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "WebsiteConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentWebsiteConfiguration(input.WebsiteConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObject struct {
+}
+
+func (*awsRestxml_serializeOpPutObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=PutObject")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/octet-stream")
+ }
+
+ if input.Body != nil {
+ payload := input.Body
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if v.BucketKeyEnabled != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled"
+ encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled)
+ }
+
+ if v.CacheControl != nil {
+ locationName := "Cache-Control"
+ encoder.SetHeader(locationName).String(*v.CacheControl)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ChecksumCRC32 != nil {
+ locationName := "X-Amz-Checksum-Crc32"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC32)
+ }
+
+ if v.ChecksumCRC32C != nil {
+ locationName := "X-Amz-Checksum-Crc32c"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC32C)
+ }
+
+ if v.ChecksumCRC64NVME != nil {
+ locationName := "X-Amz-Checksum-Crc64nvme"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME)
+ }
+
+ if v.ChecksumSHA1 != nil {
+ locationName := "X-Amz-Checksum-Sha1"
+ encoder.SetHeader(locationName).String(*v.ChecksumSHA1)
+ }
+
+ if v.ChecksumSHA256 != nil {
+ locationName := "X-Amz-Checksum-Sha256"
+ encoder.SetHeader(locationName).String(*v.ChecksumSHA256)
+ }
+
+ if v.ContentDisposition != nil {
+ locationName := "Content-Disposition"
+ encoder.SetHeader(locationName).String(*v.ContentDisposition)
+ }
+
+ if v.ContentEncoding != nil {
+ locationName := "Content-Encoding"
+ encoder.SetHeader(locationName).String(*v.ContentEncoding)
+ }
+
+ if v.ContentLanguage != nil {
+ locationName := "Content-Language"
+ encoder.SetHeader(locationName).String(*v.ContentLanguage)
+ }
+
+ if v.ContentLength != nil {
+ locationName := "Content-Length"
+ encoder.SetHeader(locationName).Long(*v.ContentLength)
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ContentType != nil {
+ locationName := "Content-Type"
+ encoder.SetHeader(locationName).String(*v.ContentType)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Expires != nil {
+ locationName := "Expires"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires))
+ }
+
+ if v.GrantFullControl != nil {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWriteACP != nil {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.IfMatch != nil {
+ locationName := "If-Match"
+ encoder.SetHeader(locationName).String(*v.IfMatch)
+ }
+
+ if v.IfNoneMatch != nil {
+ locationName := "If-None-Match"
+ encoder.SetHeader(locationName).String(*v.IfNoneMatch)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.Metadata != nil {
+ hv := encoder.Headers("X-Amz-Meta-")
+ for mapKey, mapVal := range v.Metadata {
+ hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal)
+ }
+ }
+
+ if len(v.ObjectLockLegalHoldStatus) > 0 {
+ locationName := "X-Amz-Object-Lock-Legal-Hold"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus))
+ }
+
+ if len(v.ObjectLockMode) > 0 {
+ locationName := "X-Amz-Object-Lock-Mode"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockMode))
+ }
+
+ if v.ObjectLockRetainUntilDate != nil {
+ locationName := "X-Amz-Object-Lock-Retain-Until-Date"
+ encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate))
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if len(v.ServerSideEncryption) > 0 {
+ locationName := "X-Amz-Server-Side-Encryption"
+ encoder.SetHeader(locationName).String(string(v.ServerSideEncryption))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.SSEKMSEncryptionContext != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Context"
+ encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext)
+ }
+
+ if v.SSEKMSKeyId != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"
+ encoder.SetHeader(locationName).String(*v.SSEKMSKeyId)
+ }
+
+ if len(v.StorageClass) > 0 {
+ locationName := "X-Amz-Storage-Class"
+ encoder.SetHeader(locationName).String(string(v.StorageClass))
+ }
+
+ if v.Tagging != nil {
+ locationName := "X-Amz-Tagging"
+ encoder.SetHeader(locationName).String(*v.Tagging)
+ }
+
+ if v.WebsiteRedirectLocation != nil {
+ locationName := "X-Amz-Website-Redirect-Location"
+ encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation)
+ }
+
+ if v.WriteOffsetBytes != nil {
+ locationName := "X-Amz-Write-Offset-Bytes"
+ encoder.SetHeader(locationName).Long(*v.WriteOffsetBytes)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectAcl struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectAcl) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectAclInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectAclInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.AccessControlPolicy != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlPolicy",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentAccessControlPolicy(input.AccessControlPolicy, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ACL) > 0 {
+ locationName := "X-Amz-Acl"
+ encoder.SetHeader(locationName).String(string(v.ACL))
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.GrantFullControl != nil {
+ locationName := "X-Amz-Grant-Full-Control"
+ encoder.SetHeader(locationName).String(*v.GrantFullControl)
+ }
+
+ if v.GrantRead != nil {
+ locationName := "X-Amz-Grant-Read"
+ encoder.SetHeader(locationName).String(*v.GrantRead)
+ }
+
+ if v.GrantReadACP != nil {
+ locationName := "X-Amz-Grant-Read-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantReadACP)
+ }
+
+ if v.GrantWrite != nil {
+ locationName := "X-Amz-Grant-Write"
+ encoder.SetHeader(locationName).String(*v.GrantWrite)
+ }
+
+ if v.GrantWriteACP != nil {
+ locationName := "X-Amz-Grant-Write-Acp"
+ encoder.SetHeader(locationName).String(*v.GrantWriteACP)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectLegalHold struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectLegalHoldInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.LegalHold != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "LegalHold",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentObjectLockLegalHold(input.LegalHold, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectLockConfiguration struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectLockConfigurationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?object-lock")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.ObjectLockConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectLockConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentObjectLockConfiguration(input.ObjectLockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.Token != nil {
+ locationName := "X-Amz-Bucket-Object-Lock-Token"
+ encoder.SetHeader(locationName).String(*v.Token)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectRetention struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectRetention) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectRetentionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Retention != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Retention",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentObjectLockRetention(input.Retention, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.BypassGovernanceRetention != nil {
+ locationName := "X-Amz-Bypass-Governance-Retention"
+ encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutObjectTagging struct {
+}
+
+func (*awsRestxml_serializeOpPutObjectTagging) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutObjectTaggingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.Tagging != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tagging",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentTagging(input.Tagging, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpPutPublicAccessBlock struct {
+}
+
+func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutPublicAccessBlockInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.PublicAccessBlockConfiguration != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "PublicAccessBlockConfiguration",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentPublicAccessBlockConfiguration(input.PublicAccessBlockConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpRestoreObject struct {
+}
+
+func (*awsRestxml_serializeOpRestoreObject) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*RestoreObjectInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsRestoreObjectInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if input.RestoreRequest != nil {
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+ }
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ payloadRootAttr := []smithyxml.Attr{}
+ payloadRoot := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RestoreRequest",
+ },
+ Attr: payloadRootAttr,
+ }
+ payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeDocumentRestoreRequest(input.RestoreRequest, xmlEncoder.RootElement(payloadRoot)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ payload := bytes.NewReader(xmlEncoder.Bytes())
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.VersionId != nil {
+ encoder.SetQuery("versionId").String(*v.VersionId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpSelectObjectContent struct {
+}
+
+func (*awsRestxml_serializeOpSelectObjectContent) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*SelectObjectContentInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ restEncoder.SetHeader("Content-Type").String("application/xml")
+
+ xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil))
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SelectObjectContentRequest",
+ },
+ Attr: rootAttr,
+ }
+ root.Attr = append(root.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/"))
+ if err := awsRestxml_serializeOpDocumentSelectObjectContentInput(input, xmlEncoder.RootElement(root)); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ if request, err = request.SetStream(bytes.NewReader(xmlEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectContentInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ return nil
+}
+
+func awsRestxml_serializeOpDocumentSelectObjectContentInput(v *SelectObjectContentInput, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Expression != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Expression",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Expression)
+ }
+ if len(v.ExpressionType) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ExpressionType",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.ExpressionType))
+ }
+ if v.InputSerialization != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "InputSerialization",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil {
+ return err
+ }
+ }
+ if v.OutputSerialization != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OutputSerialization",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil {
+ return err
+ }
+ }
+ if v.RequestProgress != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RequestProgress",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentRequestProgress(v.RequestProgress, el); err != nil {
+ return err
+ }
+ }
+ if v.ScanRange != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ScanRange",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentScanRange(v.ScanRange, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type awsRestxml_serializeOpUploadPart struct {
+}
+
+func (*awsRestxml_serializeOpUploadPart) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UploadPartInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPart")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsUploadPartInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/octet-stream")
+ }
+
+ if input.Body != nil {
+ payload := input.Body
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if len(v.ChecksumAlgorithm) > 0 {
+ locationName := "X-Amz-Sdk-Checksum-Algorithm"
+ encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm))
+ }
+
+ if v.ChecksumCRC32 != nil {
+ locationName := "X-Amz-Checksum-Crc32"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC32)
+ }
+
+ if v.ChecksumCRC32C != nil {
+ locationName := "X-Amz-Checksum-Crc32c"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC32C)
+ }
+
+ if v.ChecksumCRC64NVME != nil {
+ locationName := "X-Amz-Checksum-Crc64nvme"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME)
+ }
+
+ if v.ChecksumSHA1 != nil {
+ locationName := "X-Amz-Checksum-Sha1"
+ encoder.SetHeader(locationName).String(*v.ChecksumSHA1)
+ }
+
+ if v.ChecksumSHA256 != nil {
+ locationName := "X-Amz-Checksum-Sha256"
+ encoder.SetHeader(locationName).String(*v.ChecksumSHA256)
+ }
+
+ if v.ContentLength != nil {
+ locationName := "Content-Length"
+ encoder.SetHeader(locationName).Long(*v.ContentLength)
+ }
+
+ if v.ContentMD5 != nil {
+ locationName := "Content-Md5"
+ encoder.SetHeader(locationName).String(*v.ContentMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.PartNumber != nil {
+ encoder.SetQuery("partNumber").Integer(*v.PartNumber)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpUploadPartCopy struct {
+}
+
+func (*awsRestxml_serializeOpUploadPartCopy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UploadPartCopyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPartCopy")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "PUT"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.CopySource != nil {
+ locationName := "X-Amz-Copy-Source"
+ encoder.SetHeader(locationName).String(*v.CopySource)
+ }
+
+ if v.CopySourceIfMatch != nil {
+ locationName := "X-Amz-Copy-Source-If-Match"
+ encoder.SetHeader(locationName).String(*v.CopySourceIfMatch)
+ }
+
+ if v.CopySourceIfModifiedSince != nil {
+ locationName := "X-Amz-Copy-Source-If-Modified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince))
+ }
+
+ if v.CopySourceIfNoneMatch != nil {
+ locationName := "X-Amz-Copy-Source-If-None-Match"
+ encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch)
+ }
+
+ if v.CopySourceIfUnmodifiedSince != nil {
+ locationName := "X-Amz-Copy-Source-If-Unmodified-Since"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince))
+ }
+
+ if v.CopySourceRange != nil {
+ locationName := "X-Amz-Copy-Source-Range"
+ encoder.SetHeader(locationName).String(*v.CopySourceRange)
+ }
+
+ if v.CopySourceSSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm)
+ }
+
+ if v.CopySourceSSECustomerKey != nil {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey)
+ }
+
+ if v.CopySourceSSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5)
+ }
+
+ if v.ExpectedBucketOwner != nil {
+ locationName := "X-Amz-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner)
+ }
+
+ if v.ExpectedSourceBucketOwner != nil {
+ locationName := "X-Amz-Source-Expected-Bucket-Owner"
+ encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner)
+ }
+
+ if v.Key == nil || len(*v.Key) == 0 {
+ return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")}
+ }
+ if v.Key != nil {
+ if err := encoder.SetURI("Key").String(*v.Key); err != nil {
+ return err
+ }
+ }
+
+ if v.PartNumber != nil {
+ encoder.SetQuery("partNumber").Integer(*v.PartNumber)
+ }
+
+ if len(v.RequestPayer) > 0 {
+ locationName := "X-Amz-Request-Payer"
+ encoder.SetHeader(locationName).String(string(v.RequestPayer))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKey != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKey)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.UploadId != nil {
+ encoder.SetQuery("uploadId").String(*v.UploadId)
+ }
+
+ return nil
+}
+
+type awsRestxml_serializeOpWriteGetObjectResponse struct {
+}
+
+func (*awsRestxml_serializeOpWriteGetObjectResponse) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*WriteGetObjectResponseInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if !restEncoder.HasHeader("Content-Type") {
+ ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true)
+ restEncoder.SetHeader("Content-Type").String("application/octet-stream")
+ }
+
+ if input.Body != nil {
+ payload := input.Body
+ if request, err = request.SetStream(payload); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetObjectResponseInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.AcceptRanges != nil {
+ locationName := "X-Amz-Fwd-Header-Accept-Ranges"
+ encoder.SetHeader(locationName).String(*v.AcceptRanges)
+ }
+
+ if v.BucketKeyEnabled != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Bucket-Key-Enabled"
+ encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled)
+ }
+
+ if v.CacheControl != nil {
+ locationName := "X-Amz-Fwd-Header-Cache-Control"
+ encoder.SetHeader(locationName).String(*v.CacheControl)
+ }
+
+ if v.ChecksumCRC32 != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC32)
+ }
+
+ if v.ChecksumCRC32C != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32c"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC32C)
+ }
+
+ if v.ChecksumCRC64NVME != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc64nvme"
+ encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME)
+ }
+
+ if v.ChecksumSHA1 != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha1"
+ encoder.SetHeader(locationName).String(*v.ChecksumSHA1)
+ }
+
+ if v.ChecksumSHA256 != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha256"
+ encoder.SetHeader(locationName).String(*v.ChecksumSHA256)
+ }
+
+ if v.ContentDisposition != nil {
+ locationName := "X-Amz-Fwd-Header-Content-Disposition"
+ encoder.SetHeader(locationName).String(*v.ContentDisposition)
+ }
+
+ if v.ContentEncoding != nil {
+ locationName := "X-Amz-Fwd-Header-Content-Encoding"
+ encoder.SetHeader(locationName).String(*v.ContentEncoding)
+ }
+
+ if v.ContentLanguage != nil {
+ locationName := "X-Amz-Fwd-Header-Content-Language"
+ encoder.SetHeader(locationName).String(*v.ContentLanguage)
+ }
+
+ if v.ContentLength != nil {
+ locationName := "Content-Length"
+ encoder.SetHeader(locationName).Long(*v.ContentLength)
+ }
+
+ if v.ContentRange != nil {
+ locationName := "X-Amz-Fwd-Header-Content-Range"
+ encoder.SetHeader(locationName).String(*v.ContentRange)
+ }
+
+ if v.ContentType != nil {
+ locationName := "X-Amz-Fwd-Header-Content-Type"
+ encoder.SetHeader(locationName).String(*v.ContentType)
+ }
+
+ if v.DeleteMarker != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Delete-Marker"
+ encoder.SetHeader(locationName).Boolean(*v.DeleteMarker)
+ }
+
+ if v.ErrorCode != nil {
+ locationName := "X-Amz-Fwd-Error-Code"
+ encoder.SetHeader(locationName).String(*v.ErrorCode)
+ }
+
+ if v.ErrorMessage != nil {
+ locationName := "X-Amz-Fwd-Error-Message"
+ encoder.SetHeader(locationName).String(*v.ErrorMessage)
+ }
+
+ if v.ETag != nil {
+ locationName := "X-Amz-Fwd-Header-Etag"
+ encoder.SetHeader(locationName).String(*v.ETag)
+ }
+
+ if v.Expiration != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Expiration"
+ encoder.SetHeader(locationName).String(*v.Expiration)
+ }
+
+ if v.Expires != nil {
+ locationName := "X-Amz-Fwd-Header-Expires"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires))
+ }
+
+ if v.LastModified != nil {
+ locationName := "X-Amz-Fwd-Header-Last-Modified"
+ encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.LastModified))
+ }
+
+ if v.Metadata != nil {
+ hv := encoder.Headers("X-Amz-Meta-")
+ for mapKey, mapVal := range v.Metadata {
+ hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal)
+ }
+ }
+
+ if v.MissingMeta != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Missing-Meta"
+ encoder.SetHeader(locationName).Integer(*v.MissingMeta)
+ }
+
+ if len(v.ObjectLockLegalHoldStatus) > 0 {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Legal-Hold"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockLegalHoldStatus))
+ }
+
+ if len(v.ObjectLockMode) > 0 {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Mode"
+ encoder.SetHeader(locationName).String(string(v.ObjectLockMode))
+ }
+
+ if v.ObjectLockRetainUntilDate != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Object-Lock-Retain-Until-Date"
+ encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate))
+ }
+
+ if v.PartsCount != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Mp-Parts-Count"
+ encoder.SetHeader(locationName).Integer(*v.PartsCount)
+ }
+
+ if len(v.ReplicationStatus) > 0 {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Replication-Status"
+ encoder.SetHeader(locationName).String(string(v.ReplicationStatus))
+ }
+
+ if len(v.RequestCharged) > 0 {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Request-Charged"
+ encoder.SetHeader(locationName).String(string(v.RequestCharged))
+ }
+
+ if v.RequestRoute != nil {
+ locationName := "X-Amz-Request-Route"
+ encoder.SetHeader(locationName).String(*v.RequestRoute)
+ }
+
+ if v.RequestToken != nil {
+ locationName := "X-Amz-Request-Token"
+ encoder.SetHeader(locationName).String(*v.RequestToken)
+ }
+
+ if v.Restore != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Restore"
+ encoder.SetHeader(locationName).String(*v.Restore)
+ }
+
+ if len(v.ServerSideEncryption) > 0 {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption"
+ encoder.SetHeader(locationName).String(string(v.ServerSideEncryption))
+ }
+
+ if v.SSECustomerAlgorithm != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Algorithm"
+ encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm)
+ }
+
+ if v.SSECustomerKeyMD5 != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Key-Md5"
+ encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5)
+ }
+
+ if v.SSEKMSKeyId != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"
+ encoder.SetHeader(locationName).String(*v.SSEKMSKeyId)
+ }
+
+ if v.StatusCode != nil {
+ locationName := "X-Amz-Fwd-Status"
+ encoder.SetHeader(locationName).Integer(*v.StatusCode)
+ }
+
+ if len(v.StorageClass) > 0 {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Storage-Class"
+ encoder.SetHeader(locationName).String(string(v.StorageClass))
+ }
+
+ if v.TagCount != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Tagging-Count"
+ encoder.SetHeader(locationName).Integer(*v.TagCount)
+ }
+
+ if v.VersionId != nil {
+ locationName := "X-Amz-Fwd-Header-X-Amz-Version-Id"
+ encoder.SetHeader(locationName).String(*v.VersionId)
+ }
+
+ return nil
+}
+
+func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIncompleteMultipartUpload, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DaysAfterInitiation != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DaysAfterInitiation",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.DaysAfterInitiation)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAccelerateConfiguration(v *types.AccelerateConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAccessControlPolicy(v *types.AccessControlPolicy, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Grants != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlList",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGrants(v.Grants, el); err != nil {
+ return err
+ }
+ }
+ if v.Owner != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Owner",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentOwner(v.Owner, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAccessControlTranslation(v *types.AccessControlTranslation, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Owner) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Owner",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Owner))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAllowedHeaders(v []string, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(v[i])
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAllowedMethods(v []string, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(v[i])
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAllowedOrigins(v []string, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(v[i])
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsAndOperator(v *types.AnalyticsAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsConfiguration(v *types.AnalyticsConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAnalyticsFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if v.StorageClassAnalysis != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClassAnalysis",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentStorageClassAnalysis(v.StorageClassAnalysis, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsExportDestination(v *types.AnalyticsExportDestination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.S3BucketDestination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "S3BucketDestination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v.S3BucketDestination, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsFilter(v types.AnalyticsFilter, value smithyxml.Value) error {
+ defer value.Close()
+ switch uv := v.(type) {
+ case *types.AnalyticsFilterMemberAnd:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentAnalyticsAndOperator(&uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.AnalyticsFilterMemberPrefix:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ av.String(uv.Value)
+
+ case *types.AnalyticsFilterMemberTag:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v)
+
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Bucket != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Bucket",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Bucket)
+ }
+ if v.BucketAccountId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BucketAccountId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.BucketAccountId)
+ }
+ if len(v.Format) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Format",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Format))
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentBucketInfo(v *types.BucketInfo, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.DataRedundancy) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DataRedundancy",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.DataRedundancy))
+ }
+ if len(v.Type) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Type",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Type))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Rules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentLifecycleRules(v.Rules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentBucketLoggingStatus(v *types.BucketLoggingStatus, value smithyxml.Value) error {
+ defer value.Close()
+ if v.LoggingEnabled != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "LoggingEnabled",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentLoggingEnabled(v.LoggingEnabled, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCompletedMultipartUpload(v *types.CompletedMultipartUpload, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Parts != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Part",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentCompletedPartList(v.Parts, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ChecksumCRC32 != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ChecksumCRC32",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ChecksumCRC32)
+ }
+ if v.ChecksumCRC32C != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ChecksumCRC32C",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ChecksumCRC32C)
+ }
+ if v.ChecksumCRC64NVME != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ChecksumCRC64NVME",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ChecksumCRC64NVME)
+ }
+ if v.ChecksumSHA1 != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ChecksumSHA1",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ChecksumSHA1)
+ }
+ if v.ChecksumSHA256 != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ChecksumSHA256",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ChecksumSHA256)
+ }
+ if v.ETag != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ETag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ETag)
+ }
+ if v.PartNumber != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "PartNumber",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.PartNumber)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCompletedPartList(v []types.CompletedPart, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentCompletedPart(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCondition(v *types.Condition, value smithyxml.Value) error {
+ defer value.Close()
+ if v.HttpErrorCodeReturnedEquals != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "HttpErrorCodeReturnedEquals",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.HttpErrorCodeReturnedEquals)
+ }
+ if v.KeyPrefixEquals != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KeyPrefixEquals",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KeyPrefixEquals)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCORSConfiguration(v *types.CORSConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.CORSRules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CORSRule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentCORSRules(v.CORSRules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AllowedHeaders != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AllowedHeader",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentAllowedHeaders(v.AllowedHeaders, el); err != nil {
+ return err
+ }
+ }
+ if v.AllowedMethods != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AllowedMethod",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentAllowedMethods(v.AllowedMethods, el); err != nil {
+ return err
+ }
+ }
+ if v.AllowedOrigins != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AllowedOrigin",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentAllowedOrigins(v.AllowedOrigins, el); err != nil {
+ return err
+ }
+ }
+ if v.ExposeHeaders != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ExposeHeader",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentExposeHeaders(v.ExposeHeaders, el); err != nil {
+ return err
+ }
+ }
+ if v.ID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ID)
+ }
+ if v.MaxAgeSeconds != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "MaxAgeSeconds",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.MaxAgeSeconds)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCORSRules(v []types.CORSRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentCORSRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucketConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Bucket != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Bucket",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentBucketInfo(v.Bucket, el); err != nil {
+ return err
+ }
+ }
+ if v.Location != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Location",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentLocationInfo(v.Location, el); err != nil {
+ return err
+ }
+ }
+ if len(v.LocationConstraint) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "LocationConstraint",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.LocationConstraint))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AllowQuotedRecordDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AllowQuotedRecordDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.AllowQuotedRecordDelimiter)
+ }
+ if v.Comments != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Comments",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Comments)
+ }
+ if v.FieldDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "FieldDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.FieldDelimiter)
+ }
+ if len(v.FileHeaderInfo) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "FileHeaderInfo",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.FileHeaderInfo))
+ }
+ if v.QuoteCharacter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteCharacter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QuoteCharacter)
+ }
+ if v.QuoteEscapeCharacter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteEscapeCharacter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QuoteEscapeCharacter)
+ }
+ if v.RecordDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RecordDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.RecordDelimiter)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentCSVOutput(v *types.CSVOutput, value smithyxml.Value) error {
+ defer value.Close()
+ if v.FieldDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "FieldDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.FieldDelimiter)
+ }
+ if v.QuoteCharacter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteCharacter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QuoteCharacter)
+ }
+ if v.QuoteEscapeCharacter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteEscapeCharacter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QuoteEscapeCharacter)
+ }
+ if len(v.QuoteFields) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QuoteFields",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.QuoteFields))
+ }
+ if v.RecordDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RecordDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.RecordDelimiter)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Days != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.Days)
+ }
+ if len(v.Mode) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Mode",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Mode))
+ }
+ if v.Years != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Years",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.Years)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Objects != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Object",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentObjectIdentifierList(v.Objects, el); err != nil {
+ return err
+ }
+ }
+ if v.Quiet != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Quiet",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.Quiet)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentDeleteMarkerReplication(v *types.DeleteMarkerReplication, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentDestination(v *types.Destination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AccessControlTranslation != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlTranslation",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAccessControlTranslation(v.AccessControlTranslation, el); err != nil {
+ return err
+ }
+ }
+ if v.Account != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Account",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Account)
+ }
+ if v.Bucket != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Bucket",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Bucket)
+ }
+ if v.EncryptionConfiguration != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "EncryptionConfiguration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentEncryptionConfiguration(v.EncryptionConfiguration, el); err != nil {
+ return err
+ }
+ }
+ if v.Metrics != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Metrics",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentMetrics(v.Metrics, el); err != nil {
+ return err
+ }
+ }
+ if v.ReplicationTime != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplicationTime",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicationTime(v.ReplicationTime, el); err != nil {
+ return err
+ }
+ }
+ if len(v.StorageClass) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClass",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.StorageClass))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentEncryption(v *types.Encryption, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.EncryptionType) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "EncryptionType",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.EncryptionType))
+ }
+ if v.KMSContext != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KMSContext",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KMSContext)
+ }
+ if v.KMSKeyId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KMSKeyId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KMSKeyId)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentEncryptionConfiguration(v *types.EncryptionConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ReplicaKmsKeyID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplicaKmsKeyID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ReplicaKmsKeyID)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentErrorDocument(v *types.ErrorDocument, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Key != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Key",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Key)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentEventBridgeConfiguration(v *types.EventBridgeConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ return nil
+}
+
+func awsRestxml_serializeDocumentEventList(v []types.Event, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(string(v[i]))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentExistingObjectReplication(v *types.ExistingObjectReplication, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentExposeHeaders(v []string, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ am.String(v[i])
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentFilterRule(v *types.FilterRule, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Name) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Name",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Name))
+ }
+ if v.Value != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Value",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Value)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentFilterRuleList(v []types.FilterRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentFilterRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentGlacierJobParameters(v *types.GlacierJobParameters, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Tier) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tier",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Tier))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentGrant(v *types.Grant, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Grantee != nil {
+ rootAttr := []smithyxml.Attr{}
+ rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance"))
+ if len(v.Grantee.Type) > 0 {
+ var av string
+ av = string(v.Grantee.Type)
+ rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av))
+ }
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Grantee",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Permission) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Permission",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Permission))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentGrantee(v *types.Grantee, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DisplayName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DisplayName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.DisplayName)
+ }
+ if v.EmailAddress != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "EmailAddress",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.EmailAddress)
+ }
+ if v.ID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ID)
+ }
+ if v.URI != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "URI",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.URI)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentGrants(v []types.Grant, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Grant",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentGrant(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentIndexDocument(v *types.IndexDocument, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Suffix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Suffix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Suffix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInputSerialization(v *types.InputSerialization, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.CompressionType) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CompressionType",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.CompressionType))
+ }
+ if v.CSV != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CSV",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentCSVInput(v.CSV, el); err != nil {
+ return err
+ }
+ }
+ if v.JSON != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "JSON",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentJSONInput(v.JSON, el); err != nil {
+ return err
+ }
+ }
+ if v.Parquet != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Parquet",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentParquetInput(v.Parquet, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentIntelligentTieringFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ if v.Tierings != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tiering",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTieringList(v.Tierings, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentIntelligentTieringFilter(v *types.IntelligentTieringFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.And != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentIntelligentTieringAndOperator(v.And, el); err != nil {
+ return err
+ }
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tag != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Destination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Destination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryDestination(v.Destination, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if len(v.IncludedObjectVersions) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IncludedObjectVersions",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.IncludedObjectVersions))
+ }
+ if v.IsEnabled != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IsEnabled",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.IsEnabled)
+ }
+ if v.OptionalFields != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OptionalFields",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryOptionalFields(v.OptionalFields, el); err != nil {
+ return err
+ }
+ }
+ if v.Schedule != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Schedule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventorySchedule(v.Schedule, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryDestination(v *types.InventoryDestination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.S3BucketDestination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "S3BucketDestination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryS3BucketDestination(v.S3BucketDestination, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryEncryption(v *types.InventoryEncryption, value smithyxml.Value) error {
+ defer value.Close()
+ if v.SSEKMS != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SSE-KMS",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSSEKMS(v.SSEKMS, el); err != nil {
+ return err
+ }
+ }
+ if v.SSES3 != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SSE-S3",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSSES3(v.SSES3, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryFilter(v *types.InventoryFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryOptionalFields(v []types.InventoryOptionalField, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Field",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ am.String(string(v[i]))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventoryS3BucketDestination(v *types.InventoryS3BucketDestination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AccountId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccountId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.AccountId)
+ }
+ if v.Bucket != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Bucket",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Bucket)
+ }
+ if v.Encryption != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Encryption",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInventoryEncryption(v.Encryption, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Format) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Format",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Format))
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentInventorySchedule(v *types.InventorySchedule, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Frequency) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Frequency",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Frequency))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentJSONInput(v *types.JSONInput, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Type) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Type",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Type))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentJSONOutput(v *types.JSONOutput, value smithyxml.Value) error {
+ defer value.Close()
+ if v.RecordDelimiter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RecordDelimiter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.RecordDelimiter)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Events != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Event",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if v.LambdaFunctionArn != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CloudFunction",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.LambdaFunctionArn)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentLambdaFunctionConfiguration(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Date != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Date",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(smithytime.FormatDateTime(*v.Date))
+ }
+ if v.Days != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.Days)
+ }
+ if v.ExpiredObjectDeleteMarker != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ExpiredObjectDeleteMarker",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.ExpiredObjectDeleteMarker)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleRule(v *types.LifecycleRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AbortIncompleteMultipartUpload != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AbortIncompleteMultipartUpload",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v.AbortIncompleteMultipartUpload, el); err != nil {
+ return err
+ }
+ }
+ if v.Expiration != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Expiration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentLifecycleExpiration(v.Expiration, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentLifecycleRuleFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.ID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ID)
+ }
+ if v.NoncurrentVersionExpiration != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NoncurrentVersionExpiration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentNoncurrentVersionExpiration(v.NoncurrentVersionExpiration, el); err != nil {
+ return err
+ }
+ }
+ if v.NoncurrentVersionTransitions != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NoncurrentVersionTransition",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v.NoncurrentVersionTransitions, el); err != nil {
+ return err
+ }
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ if v.Transitions != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Transition",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTransitionList(v.Transitions, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ObjectSizeGreaterThan != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectSizeGreaterThan",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Long(*v.ObjectSizeGreaterThan)
+ }
+ if v.ObjectSizeLessThan != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectSizeLessThan",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Long(*v.ObjectSizeLessThan)
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.And != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(v.And, el); err != nil {
+ return err
+ }
+ }
+ if v.ObjectSizeGreaterThan != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectSizeGreaterThan",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Long(*v.ObjectSizeGreaterThan)
+ }
+ if v.ObjectSizeLessThan != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectSizeLessThan",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Long(*v.ObjectSizeLessThan)
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tag != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLifecycleRules(v []types.LifecycleRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentLifecycleRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLocationInfo(v *types.LocationInfo, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Name != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Name",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Name)
+ }
+ if len(v.Type) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Type",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Type))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value smithyxml.Value) error {
+ defer value.Close()
+ if v.TargetBucket != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TargetBucket",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.TargetBucket)
+ }
+ if v.TargetGrants != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TargetGrants",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTargetGrants(v.TargetGrants, el); err != nil {
+ return err
+ }
+ }
+ if v.TargetObjectKeyFormat != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TargetObjectKeyFormat",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTargetObjectKeyFormat(v.TargetObjectKeyFormat, el); err != nil {
+ return err
+ }
+ }
+ if v.TargetPrefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TargetPrefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.TargetPrefix)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Name != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Name",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Name)
+ }
+ if v.Value != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Value",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Value)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetadataTableConfiguration(v *types.MetadataTableConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.S3TablesDestination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "S3TablesDestination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentS3TablesDestination(v.S3TablesDestination, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error {
+ defer value.Close()
+ if v.EventThreshold != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "EventThreshold",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicationTimeValue(v.EventThreshold, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetricsAndOperator(v *types.MetricsAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AccessPointArn != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessPointArn",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.AccessPointArn)
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetricsConfiguration(v *types.MetricsConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentMetricsFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smithyxml.Value) error {
+ defer value.Close()
+ switch uv := v.(type) {
+ case *types.MetricsFilterMemberAccessPointArn:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessPointArn",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ av.String(uv.Value)
+
+ case *types.MetricsFilterMemberAnd:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentMetricsAndOperator(&uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.MetricsFilterMemberPrefix:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ av.String(uv.Value)
+
+ case *types.MetricsFilterMemberTag:
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: customMemberNameAttr,
+ }
+ av := value.MemberElement(customMemberName)
+ if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v)
+
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.NoncurrentVersionExpiration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.NewerNoncurrentVersions != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NewerNoncurrentVersions",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.NewerNoncurrentVersions)
+ }
+ if v.NoncurrentDays != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NoncurrentDays",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.NoncurrentDays)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.NoncurrentVersionTransition, value smithyxml.Value) error {
+ defer value.Close()
+ if v.NewerNoncurrentVersions != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NewerNoncurrentVersions",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.NewerNoncurrentVersions)
+ }
+ if v.NoncurrentDays != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "NoncurrentDays",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.NoncurrentDays)
+ }
+ if len(v.StorageClass) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClass",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.StorageClass))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNoncurrentVersionTransitionList(v []types.NoncurrentVersionTransition, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentNoncurrentVersionTransition(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNotificationConfiguration(v *types.NotificationConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.EventBridgeConfiguration != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "EventBridgeConfiguration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentEventBridgeConfiguration(v.EventBridgeConfiguration, el); err != nil {
+ return err
+ }
+ }
+ if v.LambdaFunctionConfigurations != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CloudFunctionConfiguration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations, el); err != nil {
+ return err
+ }
+ }
+ if v.QueueConfigurations != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "QueueConfiguration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentQueueConfigurationList(v.QueueConfigurations, el); err != nil {
+ return err
+ }
+ }
+ if v.TopicConfigurations != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TopicConfiguration",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTopicConfigurationList(v.TopicConfigurations, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.NotificationConfigurationFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Key != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "S3Key",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentS3KeyFilter(v.Key, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ETag != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ETag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ETag)
+ }
+ if v.Key != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Key",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Key)
+ }
+ if v.LastModifiedTime != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "LastModifiedTime",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(smithytime.FormatHTTPDate(*v.LastModifiedTime))
+ }
+ if v.Size != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Size",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Long(*v.Size)
+ }
+ if v.VersionId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "VersionId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.VersionId)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectIdentifierList(v []types.ObjectIdentifier, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentObjectIdentifier(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectLockConfiguration(v *types.ObjectLockConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.ObjectLockEnabled) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectLockEnabled",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.ObjectLockEnabled))
+ }
+ if v.Rule != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentObjectLockRule(v.Rule, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectLockLegalHold(v *types.ObjectLockLegalHold, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectLockRetention(v *types.ObjectLockRetention, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Mode) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Mode",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Mode))
+ }
+ if v.RetainUntilDate != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RetainUntilDate",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(smithytime.FormatDateTime(*v.RetainUntilDate))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentObjectLockRule(v *types.ObjectLockRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DefaultRetention != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DefaultRetention",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentDefaultRetention(v.DefaultRetention, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOutputLocation(v *types.OutputLocation, value smithyxml.Value) error {
+ defer value.Close()
+ if v.S3 != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "S3",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentS3Location(v.S3, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOutputSerialization(v *types.OutputSerialization, value smithyxml.Value) error {
+ defer value.Close()
+ if v.CSV != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CSV",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentCSVOutput(v.CSV, el); err != nil {
+ return err
+ }
+ }
+ if v.JSON != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "JSON",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentJSONOutput(v.JSON, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOwner(v *types.Owner, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DisplayName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DisplayName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.DisplayName)
+ }
+ if v.ID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ID)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOwnershipControls(v *types.OwnershipControls, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Rules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentOwnershipControlsRules(v.Rules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOwnershipControlsRule(v *types.OwnershipControlsRule, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.ObjectOwnership) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ObjectOwnership",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.ObjectOwnership))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentOwnershipControlsRules(v []types.OwnershipControlsRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentOwnershipControlsRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentParquetInput(v *types.ParquetInput, value smithyxml.Value) error {
+ defer value.Close()
+ return nil
+}
+
+func awsRestxml_serializeDocumentPartitionedPrefix(v *types.PartitionedPrefix, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.PartitionDateSource) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "PartitionDateSource",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.PartitionDateSource))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicAccessBlockConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.BlockPublicAcls != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BlockPublicAcls",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.BlockPublicAcls)
+ }
+ if v.BlockPublicPolicy != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BlockPublicPolicy",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.BlockPublicPolicy)
+ }
+ if v.IgnorePublicAcls != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IgnorePublicAcls",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.IgnorePublicAcls)
+ }
+ if v.RestrictPublicBuckets != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RestrictPublicBuckets",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.RestrictPublicBuckets)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentQueueConfiguration(v *types.QueueConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Events != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Event",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if v.QueueArn != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Queue",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.QueueArn)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentQueueConfigurationList(v []types.QueueConfiguration, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentQueueConfiguration(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRedirect(v *types.Redirect, value smithyxml.Value) error {
+ defer value.Close()
+ if v.HostName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "HostName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.HostName)
+ }
+ if v.HttpRedirectCode != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "HttpRedirectCode",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.HttpRedirectCode)
+ }
+ if len(v.Protocol) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Protocol",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Protocol))
+ }
+ if v.ReplaceKeyPrefixWith != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplaceKeyPrefixWith",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ReplaceKeyPrefixWith)
+ }
+ if v.ReplaceKeyWith != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplaceKeyWith",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ReplaceKeyWith)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRedirectAllRequestsTo(v *types.RedirectAllRequestsTo, value smithyxml.Value) error {
+ defer value.Close()
+ if v.HostName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "HostName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.HostName)
+ }
+ if len(v.Protocol) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Protocol",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Protocol))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicaModifications(v *types.ReplicaModifications, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationConfiguration(v *types.ReplicationConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Role != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Role",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Role)
+ }
+ if v.Rules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentReplicationRules(v.Rules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DeleteMarkerReplication != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DeleteMarkerReplication",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentDeleteMarkerReplication(v.DeleteMarkerReplication, el); err != nil {
+ return err
+ }
+ }
+ if v.Destination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Destination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentDestination(v.Destination, el); err != nil {
+ return err
+ }
+ }
+ if v.ExistingObjectReplication != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ExistingObjectReplication",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentExistingObjectReplication(v.ExistingObjectReplication, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicationRuleFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.ID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.ID)
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Priority != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Priority",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.Priority)
+ }
+ if v.SourceSelectionCriteria != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SourceSelectionCriteria",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSourceSelectionCriteria(v.SourceSelectionCriteria, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tags != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.Tags, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.And != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "And",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(v.And, el); err != nil {
+ return err
+ }
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if v.Tag != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationRules(v []types.ReplicationRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentReplicationRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationTime(v *types.ReplicationTime, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ if v.Time != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Time",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicationTimeValue(v.Time, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeValue, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Minutes != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Minutes",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.Minutes)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRequestPaymentConfiguration(v *types.RequestPaymentConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Payer) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Payer",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Payer))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Enabled != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Enabled",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.Enabled)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Days != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.Days)
+ }
+ if v.Description != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Description",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Description)
+ }
+ if v.GlacierJobParameters != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "GlacierJobParameters",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGlacierJobParameters(v.GlacierJobParameters, el); err != nil {
+ return err
+ }
+ }
+ if v.OutputLocation != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OutputLocation",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentOutputLocation(v.OutputLocation, el); err != nil {
+ return err
+ }
+ }
+ if v.SelectParameters != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SelectParameters",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSelectParameters(v.SelectParameters, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Tier) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tier",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Tier))
+ }
+ if len(v.Type) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Type",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Type))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRoutingRule(v *types.RoutingRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Condition != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Condition",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentCondition(v.Condition, el); err != nil {
+ return err
+ }
+ }
+ if v.Redirect != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Redirect",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentRedirect(v.Redirect, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentRoutingRules(v []types.RoutingRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RoutingRule",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentRoutingRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentS3KeyFilter(v *types.S3KeyFilter, value smithyxml.Value) error {
+ defer value.Close()
+ if v.FilterRules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "FilterRule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentFilterRuleList(v.FilterRules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml.Value) error {
+ defer value.Close()
+ if v.AccessControlList != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessControlList",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGrants(v.AccessControlList, el); err != nil {
+ return err
+ }
+ }
+ if v.BucketName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BucketName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.BucketName)
+ }
+ if len(v.CannedACL) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "CannedACL",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.CannedACL))
+ }
+ if v.Encryption != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Encryption",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentEncryption(v.Encryption, el); err != nil {
+ return err
+ }
+ }
+ if v.Prefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Prefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Prefix)
+ }
+ if len(v.StorageClass) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClass",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.StorageClass))
+ }
+ if v.Tagging != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tagging",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTagging(v.Tagging, el); err != nil {
+ return err
+ }
+ }
+ if v.UserMetadata != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "UserMetadata",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentUserMetadata(v.UserMetadata, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentS3TablesDestination(v *types.S3TablesDestination, value smithyxml.Value) error {
+ defer value.Close()
+ if v.TableBucketArn != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TableBucketArn",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.TableBucketArn)
+ }
+ if v.TableName != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TableName",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.TableName)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error {
+ defer value.Close()
+ if v.End != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "End",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Long(*v.End)
+ }
+ if v.Start != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Start",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Long(*v.Start)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSelectParameters(v *types.SelectParameters, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Expression != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Expression",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Expression)
+ }
+ if len(v.ExpressionType) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ExpressionType",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.ExpressionType))
+ }
+ if v.InputSerialization != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "InputSerialization",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentInputSerialization(v.InputSerialization, el); err != nil {
+ return err
+ }
+ }
+ if v.OutputSerialization != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OutputSerialization",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentOutputSerialization(v.OutputSerialization, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault, value smithyxml.Value) error {
+ defer value.Close()
+ if v.KMSMasterKeyID != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KMSMasterKeyID",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KMSMasterKeyID)
+ }
+ if len(v.SSEAlgorithm) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SSEAlgorithm",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.SSEAlgorithm))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Rules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Rule",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentServerSideEncryptionRules(v.Rules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEncryptionRule, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ApplyServerSideEncryptionByDefault != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ApplyServerSideEncryptionByDefault",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault, el); err != nil {
+ return err
+ }
+ }
+ if v.BucketKeyEnabled != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "BucketKeyEnabled",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Boolean(*v.BucketKeyEnabled)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentServerSideEncryptionRules(v []types.ServerSideEncryptionRule, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentServerSideEncryptionRule(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSimplePrefix(v *types.SimplePrefix, value smithyxml.Value) error {
+ defer value.Close()
+ return nil
+}
+
+func awsRestxml_serializeDocumentSourceSelectionCriteria(v *types.SourceSelectionCriteria, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ReplicaModifications != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ReplicaModifications",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentReplicaModifications(v.ReplicaModifications, el); err != nil {
+ return err
+ }
+ }
+ if v.SseKmsEncryptedObjects != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SseKmsEncryptedObjects",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSseKmsEncryptedObjects(v.SseKmsEncryptedObjects, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSSEKMS(v *types.SSEKMS, value smithyxml.Value) error {
+ defer value.Close()
+ if v.KeyId != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "KeyId",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.KeyId)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentSSES3(v *types.SSES3, value smithyxml.Value) error {
+ defer value.Close()
+ return nil
+}
+
+func awsRestxml_serializeDocumentStorageClassAnalysis(v *types.StorageClassAnalysis, value smithyxml.Value) error {
+ defer value.Close()
+ if v.DataExport != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "DataExport",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v.DataExport, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Destination != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Destination",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentAnalyticsExportDestination(v.Destination, el); err != nil {
+ return err
+ }
+ }
+ if len(v.OutputSchemaVersion) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "OutputSchemaVersion",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.OutputSchemaVersion))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTag(v *types.Tag, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Key != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Key",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Key)
+ }
+ if v.Value != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Value",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Value)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTagging(v *types.Tagging, value smithyxml.Value) error {
+ defer value.Close()
+ if v.TagSet != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "TagSet",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentTagSet(v.TagSet, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTagSet(v []types.Tag, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Tag",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTag(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTargetGrant(v *types.TargetGrant, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Grantee != nil {
+ rootAttr := []smithyxml.Attr{}
+ rootAttr = append(rootAttr, smithyxml.NewNamespaceAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance"))
+ if len(v.Grantee.Type) > 0 {
+ var av string
+ av = string(v.Grantee.Type)
+ rootAttr = append(rootAttr, smithyxml.NewAttribute("xsi:type", av))
+ }
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Grantee",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentGrantee(v.Grantee, el); err != nil {
+ return err
+ }
+ }
+ if len(v.Permission) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Permission",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Permission))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTargetGrants(v []types.TargetGrant, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Grant",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTargetGrant(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTargetObjectKeyFormat(v *types.TargetObjectKeyFormat, value smithyxml.Value) error {
+ defer value.Close()
+ if v.PartitionedPrefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "PartitionedPrefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentPartitionedPrefix(v.PartitionedPrefix, el); err != nil {
+ return err
+ }
+ }
+ if v.SimplePrefix != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "SimplePrefix",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentSimplePrefix(v.SimplePrefix, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.AccessTier) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "AccessTier",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.AccessTier))
+ }
+ if v.Days != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.Days)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTieringList(v []types.Tiering, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTiering(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTopicConfiguration(v *types.TopicConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Events != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Event",
+ },
+ Attr: rootAttr,
+ }
+ el := value.FlattenedElement(root)
+ if err := awsRestxml_serializeDocumentEventList(v.Events, el); err != nil {
+ return err
+ }
+ }
+ if v.Filter != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Filter",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentNotificationConfigurationFilter(v.Filter, el); err != nil {
+ return err
+ }
+ }
+ if v.Id != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Id",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.Id)
+ }
+ if v.TopicArn != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Topic",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(*v.TopicArn)
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTopicConfigurationList(v []types.TopicConfiguration, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTopicConfiguration(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml.Value) error {
+ defer value.Close()
+ if v.Date != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Date",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(smithytime.FormatDateTime(*v.Date))
+ }
+ if v.Days != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Days",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.Integer(*v.Days)
+ }
+ if len(v.StorageClass) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "StorageClass",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.StorageClass))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentTransitionList(v []types.Transition, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ array = value.Array()
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentTransition(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentUserMetadata(v []types.MetadataEntry, value smithyxml.Value) error {
+ var array *smithyxml.Array
+ if !value.IsFlattened() {
+ defer value.Close()
+ }
+ customMemberNameAttr := []smithyxml.Attr{}
+ customMemberName := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "MetadataEntry",
+ },
+ Attr: customMemberNameAttr,
+ }
+ array = value.ArrayWithCustomName(customMemberName)
+ for i := range v {
+ am := array.Member()
+ if err := awsRestxml_serializeDocumentMetadataEntry(&v[i], am); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentVersioningConfiguration(v *types.VersioningConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if len(v.MFADelete) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "MfaDelete",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.MFADelete))
+ }
+ if len(v.Status) > 0 {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "Status",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ el.String(string(v.Status))
+ }
+ return nil
+}
+
+func awsRestxml_serializeDocumentWebsiteConfiguration(v *types.WebsiteConfiguration, value smithyxml.Value) error {
+ defer value.Close()
+ if v.ErrorDocument != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "ErrorDocument",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentErrorDocument(v.ErrorDocument, el); err != nil {
+ return err
+ }
+ }
+ if v.IndexDocument != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "IndexDocument",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentIndexDocument(v.IndexDocument, el); err != nil {
+ return err
+ }
+ }
+ if v.RedirectAllRequestsTo != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RedirectAllRequestsTo",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentRedirectAllRequestsTo(v.RedirectAllRequestsTo, el); err != nil {
+ return err
+ }
+ }
+ if v.RoutingRules != nil {
+ rootAttr := []smithyxml.Attr{}
+ root := smithyxml.StartElement{
+ Name: smithyxml.Name{
+ Local: "RoutingRules",
+ },
+ Attr: rootAttr,
+ }
+ el := value.MemberElement(root)
+ if err := awsRestxml_serializeDocumentRoutingRules(v.RoutingRules, el); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go
new file mode 100644
index 000000000..ac5ab0bef
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go
@@ -0,0 +1,1474 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+type AnalyticsS3ExportFileFormat string
+
+// Enum values for AnalyticsS3ExportFileFormat
+const (
+ AnalyticsS3ExportFileFormatCsv AnalyticsS3ExportFileFormat = "CSV"
+)
+
+// Values returns all known values for AnalyticsS3ExportFileFormat. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat {
+ return []AnalyticsS3ExportFileFormat{
+ "CSV",
+ }
+}
+
+type ArchiveStatus string
+
+// Enum values for ArchiveStatus
+const (
+ ArchiveStatusArchiveAccess ArchiveStatus = "ARCHIVE_ACCESS"
+ ArchiveStatusDeepArchiveAccess ArchiveStatus = "DEEP_ARCHIVE_ACCESS"
+)
+
+// Values returns all known values for ArchiveStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ArchiveStatus) Values() []ArchiveStatus {
+ return []ArchiveStatus{
+ "ARCHIVE_ACCESS",
+ "DEEP_ARCHIVE_ACCESS",
+ }
+}
+
+type BucketAccelerateStatus string
+
+// Enum values for BucketAccelerateStatus
+const (
+ BucketAccelerateStatusEnabled BucketAccelerateStatus = "Enabled"
+ BucketAccelerateStatusSuspended BucketAccelerateStatus = "Suspended"
+)
+
+// Values returns all known values for BucketAccelerateStatus. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BucketAccelerateStatus) Values() []BucketAccelerateStatus {
+ return []BucketAccelerateStatus{
+ "Enabled",
+ "Suspended",
+ }
+}
+
+type BucketCannedACL string
+
+// Enum values for BucketCannedACL
+const (
+ BucketCannedACLPrivate BucketCannedACL = "private"
+ BucketCannedACLPublicRead BucketCannedACL = "public-read"
+ BucketCannedACLPublicReadWrite BucketCannedACL = "public-read-write"
+ BucketCannedACLAuthenticatedRead BucketCannedACL = "authenticated-read"
+)
+
+// Values returns all known values for BucketCannedACL. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BucketCannedACL) Values() []BucketCannedACL {
+ return []BucketCannedACL{
+ "private",
+ "public-read",
+ "public-read-write",
+ "authenticated-read",
+ }
+}
+
+type BucketLocationConstraint string
+
+// Enum values for BucketLocationConstraint
+const (
+ BucketLocationConstraintAfSouth1 BucketLocationConstraint = "af-south-1"
+ BucketLocationConstraintApEast1 BucketLocationConstraint = "ap-east-1"
+ BucketLocationConstraintApNortheast1 BucketLocationConstraint = "ap-northeast-1"
+ BucketLocationConstraintApNortheast2 BucketLocationConstraint = "ap-northeast-2"
+ BucketLocationConstraintApNortheast3 BucketLocationConstraint = "ap-northeast-3"
+ BucketLocationConstraintApSouth1 BucketLocationConstraint = "ap-south-1"
+ BucketLocationConstraintApSouth2 BucketLocationConstraint = "ap-south-2"
+ BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1"
+ BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2"
+ BucketLocationConstraintApSoutheast3 BucketLocationConstraint = "ap-southeast-3"
+ BucketLocationConstraintApSoutheast4 BucketLocationConstraint = "ap-southeast-4"
+ BucketLocationConstraintApSoutheast5 BucketLocationConstraint = "ap-southeast-5"
+ BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1"
+ BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1"
+ BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1"
+ BucketLocationConstraintEu BucketLocationConstraint = "EU"
+ BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1"
+ BucketLocationConstraintEuCentral2 BucketLocationConstraint = "eu-central-2"
+ BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1"
+ BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1"
+ BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2"
+ BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1"
+ BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2"
+ BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3"
+ BucketLocationConstraintIlCentral1 BucketLocationConstraint = "il-central-1"
+ BucketLocationConstraintMeCentral1 BucketLocationConstraint = "me-central-1"
+ BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1"
+ BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1"
+ BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2"
+ BucketLocationConstraintUsGovEast1 BucketLocationConstraint = "us-gov-east-1"
+ BucketLocationConstraintUsGovWest1 BucketLocationConstraint = "us-gov-west-1"
+ BucketLocationConstraintUsWest1 BucketLocationConstraint = "us-west-1"
+ BucketLocationConstraintUsWest2 BucketLocationConstraint = "us-west-2"
+)
+
+// Values returns all known values for BucketLocationConstraint. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BucketLocationConstraint) Values() []BucketLocationConstraint {
+ return []BucketLocationConstraint{
+ "af-south-1",
+ "ap-east-1",
+ "ap-northeast-1",
+ "ap-northeast-2",
+ "ap-northeast-3",
+ "ap-south-1",
+ "ap-south-2",
+ "ap-southeast-1",
+ "ap-southeast-2",
+ "ap-southeast-3",
+ "ap-southeast-4",
+ "ap-southeast-5",
+ "ca-central-1",
+ "cn-north-1",
+ "cn-northwest-1",
+ "EU",
+ "eu-central-1",
+ "eu-central-2",
+ "eu-north-1",
+ "eu-south-1",
+ "eu-south-2",
+ "eu-west-1",
+ "eu-west-2",
+ "eu-west-3",
+ "il-central-1",
+ "me-central-1",
+ "me-south-1",
+ "sa-east-1",
+ "us-east-2",
+ "us-gov-east-1",
+ "us-gov-west-1",
+ "us-west-1",
+ "us-west-2",
+ }
+}
+
+type BucketLogsPermission string
+
+// Enum values for BucketLogsPermission
+const (
+ BucketLogsPermissionFullControl BucketLogsPermission = "FULL_CONTROL"
+ BucketLogsPermissionRead BucketLogsPermission = "READ"
+ BucketLogsPermissionWrite BucketLogsPermission = "WRITE"
+)
+
+// Values returns all known values for BucketLogsPermission. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BucketLogsPermission) Values() []BucketLogsPermission {
+ return []BucketLogsPermission{
+ "FULL_CONTROL",
+ "READ",
+ "WRITE",
+ }
+}
+
+type BucketType string
+
+// Enum values for BucketType
+const (
+ BucketTypeDirectory BucketType = "Directory"
+)
+
+// Values returns all known values for BucketType. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BucketType) Values() []BucketType {
+ return []BucketType{
+ "Directory",
+ }
+}
+
+type BucketVersioningStatus string
+
+// Enum values for BucketVersioningStatus
+const (
+ BucketVersioningStatusEnabled BucketVersioningStatus = "Enabled"
+ BucketVersioningStatusSuspended BucketVersioningStatus = "Suspended"
+)
+
+// Values returns all known values for BucketVersioningStatus. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BucketVersioningStatus) Values() []BucketVersioningStatus {
+ return []BucketVersioningStatus{
+ "Enabled",
+ "Suspended",
+ }
+}
+
+type ChecksumAlgorithm string
+
+// Enum values for ChecksumAlgorithm
+const (
+ ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32"
+ ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C"
+ ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1"
+ ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256"
+ ChecksumAlgorithmCrc64nvme ChecksumAlgorithm = "CRC64NVME"
+)
+
+// Values returns all known values for ChecksumAlgorithm. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ChecksumAlgorithm) Values() []ChecksumAlgorithm {
+ return []ChecksumAlgorithm{
+ "CRC32",
+ "CRC32C",
+ "SHA1",
+ "SHA256",
+ "CRC64NVME",
+ }
+}
+
+type ChecksumMode string
+
+// Enum values for ChecksumMode
+const (
+ ChecksumModeEnabled ChecksumMode = "ENABLED"
+)
+
+// Values returns all known values for ChecksumMode. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ChecksumMode) Values() []ChecksumMode {
+ return []ChecksumMode{
+ "ENABLED",
+ }
+}
+
+type ChecksumType string
+
+// Enum values for ChecksumType
+const (
+ ChecksumTypeComposite ChecksumType = "COMPOSITE"
+ ChecksumTypeFullObject ChecksumType = "FULL_OBJECT"
+)
+
+// Values returns all known values for ChecksumType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ChecksumType) Values() []ChecksumType {
+ return []ChecksumType{
+ "COMPOSITE",
+ "FULL_OBJECT",
+ }
+}
+
+type CompressionType string
+
+// Enum values for CompressionType
+const (
+ CompressionTypeNone CompressionType = "NONE"
+ CompressionTypeGzip CompressionType = "GZIP"
+ CompressionTypeBzip2 CompressionType = "BZIP2"
+)
+
+// Values returns all known values for CompressionType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (CompressionType) Values() []CompressionType {
+ return []CompressionType{
+ "NONE",
+ "GZIP",
+ "BZIP2",
+ }
+}
+
+type DataRedundancy string
+
+// Enum values for DataRedundancy
+const (
+ DataRedundancySingleAvailabilityZone DataRedundancy = "SingleAvailabilityZone"
+ DataRedundancySingleLocalZone DataRedundancy = "SingleLocalZone"
+)
+
+// Values returns all known values for DataRedundancy. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (DataRedundancy) Values() []DataRedundancy {
+ return []DataRedundancy{
+ "SingleAvailabilityZone",
+ "SingleLocalZone",
+ }
+}
+
+type DeleteMarkerReplicationStatus string
+
+// Enum values for DeleteMarkerReplicationStatus
+const (
+ DeleteMarkerReplicationStatusEnabled DeleteMarkerReplicationStatus = "Enabled"
+ DeleteMarkerReplicationStatusDisabled DeleteMarkerReplicationStatus = "Disabled"
+)
+
+// Values returns all known values for DeleteMarkerReplicationStatus. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus {
+ return []DeleteMarkerReplicationStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type EncodingType string
+
+// Enum values for EncodingType
+const (
+ EncodingTypeUrl EncodingType = "url"
+)
+
+// Values returns all known values for EncodingType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (EncodingType) Values() []EncodingType {
+ return []EncodingType{
+ "url",
+ }
+}
+
+type Event string
+
+// Enum values for Event
+const (
+ EventS3ReducedRedundancyLostObject Event = "s3:ReducedRedundancyLostObject"
+ EventS3ObjectCreated Event = "s3:ObjectCreated:*"
+ EventS3ObjectCreatedPut Event = "s3:ObjectCreated:Put"
+ EventS3ObjectCreatedPost Event = "s3:ObjectCreated:Post"
+ EventS3ObjectCreatedCopy Event = "s3:ObjectCreated:Copy"
+ EventS3ObjectCreatedCompleteMultipartUpload Event = "s3:ObjectCreated:CompleteMultipartUpload"
+ EventS3ObjectRemoved Event = "s3:ObjectRemoved:*"
+ EventS3ObjectRemovedDelete Event = "s3:ObjectRemoved:Delete"
+ EventS3ObjectRemovedDeleteMarkerCreated Event = "s3:ObjectRemoved:DeleteMarkerCreated"
+ EventS3ObjectRestore Event = "s3:ObjectRestore:*"
+ EventS3ObjectRestorePost Event = "s3:ObjectRestore:Post"
+ EventS3ObjectRestoreCompleted Event = "s3:ObjectRestore:Completed"
+ EventS3Replication Event = "s3:Replication:*"
+ EventS3ReplicationOperationFailedReplication Event = "s3:Replication:OperationFailedReplication"
+ EventS3ReplicationOperationNotTracked Event = "s3:Replication:OperationNotTracked"
+ EventS3ReplicationOperationMissedThreshold Event = "s3:Replication:OperationMissedThreshold"
+ EventS3ReplicationOperationReplicatedAfterThreshold Event = "s3:Replication:OperationReplicatedAfterThreshold"
+ EventS3ObjectRestoreDelete Event = "s3:ObjectRestore:Delete"
+ EventS3LifecycleTransition Event = "s3:LifecycleTransition"
+ EventS3IntelligentTiering Event = "s3:IntelligentTiering"
+ EventS3ObjectAclPut Event = "s3:ObjectAcl:Put"
+ EventS3LifecycleExpiration Event = "s3:LifecycleExpiration:*"
+ EventS3LifecycleExpirationDelete Event = "s3:LifecycleExpiration:Delete"
+ EventS3LifecycleExpirationDeleteMarkerCreated Event = "s3:LifecycleExpiration:DeleteMarkerCreated"
+ EventS3ObjectTagging Event = "s3:ObjectTagging:*"
+ EventS3ObjectTaggingPut Event = "s3:ObjectTagging:Put"
+ EventS3ObjectTaggingDelete Event = "s3:ObjectTagging:Delete"
+)
+
+// Values returns all known values for Event. Note that this can be expanded in
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (Event) Values() []Event {
+ return []Event{
+ "s3:ReducedRedundancyLostObject",
+ "s3:ObjectCreated:*",
+ "s3:ObjectCreated:Put",
+ "s3:ObjectCreated:Post",
+ "s3:ObjectCreated:Copy",
+ "s3:ObjectCreated:CompleteMultipartUpload",
+ "s3:ObjectRemoved:*",
+ "s3:ObjectRemoved:Delete",
+ "s3:ObjectRemoved:DeleteMarkerCreated",
+ "s3:ObjectRestore:*",
+ "s3:ObjectRestore:Post",
+ "s3:ObjectRestore:Completed",
+ "s3:Replication:*",
+ "s3:Replication:OperationFailedReplication",
+ "s3:Replication:OperationNotTracked",
+ "s3:Replication:OperationMissedThreshold",
+ "s3:Replication:OperationReplicatedAfterThreshold",
+ "s3:ObjectRestore:Delete",
+ "s3:LifecycleTransition",
+ "s3:IntelligentTiering",
+ "s3:ObjectAcl:Put",
+ "s3:LifecycleExpiration:*",
+ "s3:LifecycleExpiration:Delete",
+ "s3:LifecycleExpiration:DeleteMarkerCreated",
+ "s3:ObjectTagging:*",
+ "s3:ObjectTagging:Put",
+ "s3:ObjectTagging:Delete",
+ }
+}
+
+type ExistingObjectReplicationStatus string
+
+// Enum values for ExistingObjectReplicationStatus
+const (
+ ExistingObjectReplicationStatusEnabled ExistingObjectReplicationStatus = "Enabled"
+ ExistingObjectReplicationStatusDisabled ExistingObjectReplicationStatus = "Disabled"
+)
+
+// Values returns all known values for ExistingObjectReplicationStatus. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus {
+ return []ExistingObjectReplicationStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ExpirationStatus string
+
+// Enum values for ExpirationStatus
+const (
+ ExpirationStatusEnabled ExpirationStatus = "Enabled"
+ ExpirationStatusDisabled ExpirationStatus = "Disabled"
+)
+
+// Values returns all known values for ExpirationStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ExpirationStatus) Values() []ExpirationStatus {
+ return []ExpirationStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ExpressionType string
+
+// Enum values for ExpressionType
+const (
+ ExpressionTypeSql ExpressionType = "SQL"
+)
+
+// Values returns all known values for ExpressionType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ExpressionType) Values() []ExpressionType {
+ return []ExpressionType{
+ "SQL",
+ }
+}
+
+type FileHeaderInfo string
+
+// Enum values for FileHeaderInfo
+const (
+ FileHeaderInfoUse FileHeaderInfo = "USE"
+ FileHeaderInfoIgnore FileHeaderInfo = "IGNORE"
+ FileHeaderInfoNone FileHeaderInfo = "NONE"
+)
+
+// Values returns all known values for FileHeaderInfo. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (FileHeaderInfo) Values() []FileHeaderInfo {
+ return []FileHeaderInfo{
+ "USE",
+ "IGNORE",
+ "NONE",
+ }
+}
+
+type FilterRuleName string
+
+// Enum values for FilterRuleName
+const (
+ FilterRuleNamePrefix FilterRuleName = "prefix"
+ FilterRuleNameSuffix FilterRuleName = "suffix"
+)
+
+// Values returns all known values for FilterRuleName. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (FilterRuleName) Values() []FilterRuleName {
+ return []FilterRuleName{
+ "prefix",
+ "suffix",
+ }
+}
+
+type IntelligentTieringAccessTier string
+
+// Enum values for IntelligentTieringAccessTier
+const (
+ IntelligentTieringAccessTierArchiveAccess IntelligentTieringAccessTier = "ARCHIVE_ACCESS"
+ IntelligentTieringAccessTierDeepArchiveAccess IntelligentTieringAccessTier = "DEEP_ARCHIVE_ACCESS"
+)
+
+// Values returns all known values for IntelligentTieringAccessTier. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier {
+ return []IntelligentTieringAccessTier{
+ "ARCHIVE_ACCESS",
+ "DEEP_ARCHIVE_ACCESS",
+ }
+}
+
+type IntelligentTieringStatus string
+
+// Enum values for IntelligentTieringStatus
+const (
+ IntelligentTieringStatusEnabled IntelligentTieringStatus = "Enabled"
+ IntelligentTieringStatusDisabled IntelligentTieringStatus = "Disabled"
+)
+
+// Values returns all known values for IntelligentTieringStatus. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (IntelligentTieringStatus) Values() []IntelligentTieringStatus {
+ return []IntelligentTieringStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type InventoryFormat string
+
+// Enum values for InventoryFormat
+const (
+ InventoryFormatCsv InventoryFormat = "CSV"
+ InventoryFormatOrc InventoryFormat = "ORC"
+ InventoryFormatParquet InventoryFormat = "Parquet"
+)
+
+// Values returns all known values for InventoryFormat. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (InventoryFormat) Values() []InventoryFormat {
+ return []InventoryFormat{
+ "CSV",
+ "ORC",
+ "Parquet",
+ }
+}
+
+type InventoryFrequency string
+
+// Enum values for InventoryFrequency
+const (
+ InventoryFrequencyDaily InventoryFrequency = "Daily"
+ InventoryFrequencyWeekly InventoryFrequency = "Weekly"
+)
+
+// Values returns all known values for InventoryFrequency. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (InventoryFrequency) Values() []InventoryFrequency {
+ return []InventoryFrequency{
+ "Daily",
+ "Weekly",
+ }
+}
+
+type InventoryIncludedObjectVersions string
+
+// Enum values for InventoryIncludedObjectVersions
+const (
+ InventoryIncludedObjectVersionsAll InventoryIncludedObjectVersions = "All"
+ InventoryIncludedObjectVersionsCurrent InventoryIncludedObjectVersions = "Current"
+)
+
+// Values returns all known values for InventoryIncludedObjectVersions. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions {
+ return []InventoryIncludedObjectVersions{
+ "All",
+ "Current",
+ }
+}
+
+type InventoryOptionalField string
+
+// Enum values for InventoryOptionalField
+const (
+ InventoryOptionalFieldSize InventoryOptionalField = "Size"
+ InventoryOptionalFieldLastModifiedDate InventoryOptionalField = "LastModifiedDate"
+ InventoryOptionalFieldStorageClass InventoryOptionalField = "StorageClass"
+ InventoryOptionalFieldETag InventoryOptionalField = "ETag"
+ InventoryOptionalFieldIsMultipartUploaded InventoryOptionalField = "IsMultipartUploaded"
+ InventoryOptionalFieldReplicationStatus InventoryOptionalField = "ReplicationStatus"
+ InventoryOptionalFieldEncryptionStatus InventoryOptionalField = "EncryptionStatus"
+ InventoryOptionalFieldObjectLockRetainUntilDate InventoryOptionalField = "ObjectLockRetainUntilDate"
+ InventoryOptionalFieldObjectLockMode InventoryOptionalField = "ObjectLockMode"
+ InventoryOptionalFieldObjectLockLegalHoldStatus InventoryOptionalField = "ObjectLockLegalHoldStatus"
+ InventoryOptionalFieldIntelligentTieringAccessTier InventoryOptionalField = "IntelligentTieringAccessTier"
+ InventoryOptionalFieldBucketKeyStatus InventoryOptionalField = "BucketKeyStatus"
+ InventoryOptionalFieldChecksumAlgorithm InventoryOptionalField = "ChecksumAlgorithm"
+ InventoryOptionalFieldObjectAccessControlList InventoryOptionalField = "ObjectAccessControlList"
+ InventoryOptionalFieldObjectOwner InventoryOptionalField = "ObjectOwner"
+)
+
+// Values returns all known values for InventoryOptionalField. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (InventoryOptionalField) Values() []InventoryOptionalField {
+ return []InventoryOptionalField{
+ "Size",
+ "LastModifiedDate",
+ "StorageClass",
+ "ETag",
+ "IsMultipartUploaded",
+ "ReplicationStatus",
+ "EncryptionStatus",
+ "ObjectLockRetainUntilDate",
+ "ObjectLockMode",
+ "ObjectLockLegalHoldStatus",
+ "IntelligentTieringAccessTier",
+ "BucketKeyStatus",
+ "ChecksumAlgorithm",
+ "ObjectAccessControlList",
+ "ObjectOwner",
+ }
+}
+
+type JSONType string
+
+// Enum values for JSONType
+const (
+ JSONTypeDocument JSONType = "DOCUMENT"
+ JSONTypeLines JSONType = "LINES"
+)
+
+// Values returns all known values for JSONType. Note that this can be expanded in
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (JSONType) Values() []JSONType {
+ return []JSONType{
+ "DOCUMENT",
+ "LINES",
+ }
+}
+
+type LocationType string
+
+// Enum values for LocationType
+const (
+ LocationTypeAvailabilityZone LocationType = "AvailabilityZone"
+ LocationTypeLocalZone LocationType = "LocalZone"
+)
+
+// Values returns all known values for LocationType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (LocationType) Values() []LocationType {
+ return []LocationType{
+ "AvailabilityZone",
+ "LocalZone",
+ }
+}
+
+type MetadataDirective string
+
+// Enum values for MetadataDirective
+const (
+ MetadataDirectiveCopy MetadataDirective = "COPY"
+ MetadataDirectiveReplace MetadataDirective = "REPLACE"
+)
+
+// Values returns all known values for MetadataDirective. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (MetadataDirective) Values() []MetadataDirective {
+ return []MetadataDirective{
+ "COPY",
+ "REPLACE",
+ }
+}
+
+type MetricsStatus string
+
+// Enum values for MetricsStatus
+const (
+ MetricsStatusEnabled MetricsStatus = "Enabled"
+ MetricsStatusDisabled MetricsStatus = "Disabled"
+)
+
+// Values returns all known values for MetricsStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (MetricsStatus) Values() []MetricsStatus {
+ return []MetricsStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type MFADelete string
+
+// Enum values for MFADelete
+const (
+ MFADeleteEnabled MFADelete = "Enabled"
+ MFADeleteDisabled MFADelete = "Disabled"
+)
+
+// Values returns all known values for MFADelete. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (MFADelete) Values() []MFADelete {
+ return []MFADelete{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type MFADeleteStatus string
+
+// Enum values for MFADeleteStatus
+const (
+ MFADeleteStatusEnabled MFADeleteStatus = "Enabled"
+ MFADeleteStatusDisabled MFADeleteStatus = "Disabled"
+)
+
+// Values returns all known values for MFADeleteStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (MFADeleteStatus) Values() []MFADeleteStatus {
+ return []MFADeleteStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ObjectAttributes string
+
+// Enum values for ObjectAttributes
+const (
+ ObjectAttributesEtag ObjectAttributes = "ETag"
+ ObjectAttributesChecksum ObjectAttributes = "Checksum"
+ ObjectAttributesObjectParts ObjectAttributes = "ObjectParts"
+ ObjectAttributesStorageClass ObjectAttributes = "StorageClass"
+ ObjectAttributesObjectSize ObjectAttributes = "ObjectSize"
+)
+
+// Values returns all known values for ObjectAttributes. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectAttributes) Values() []ObjectAttributes {
+ return []ObjectAttributes{
+ "ETag",
+ "Checksum",
+ "ObjectParts",
+ "StorageClass",
+ "ObjectSize",
+ }
+}
+
+type ObjectCannedACL string
+
+// Enum values for ObjectCannedACL
+const (
+ ObjectCannedACLPrivate ObjectCannedACL = "private"
+ ObjectCannedACLPublicRead ObjectCannedACL = "public-read"
+ ObjectCannedACLPublicReadWrite ObjectCannedACL = "public-read-write"
+ ObjectCannedACLAuthenticatedRead ObjectCannedACL = "authenticated-read"
+ ObjectCannedACLAwsExecRead ObjectCannedACL = "aws-exec-read"
+ ObjectCannedACLBucketOwnerRead ObjectCannedACL = "bucket-owner-read"
+ ObjectCannedACLBucketOwnerFullControl ObjectCannedACL = "bucket-owner-full-control"
+)
+
+// Values returns all known values for ObjectCannedACL. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectCannedACL) Values() []ObjectCannedACL {
+ return []ObjectCannedACL{
+ "private",
+ "public-read",
+ "public-read-write",
+ "authenticated-read",
+ "aws-exec-read",
+ "bucket-owner-read",
+ "bucket-owner-full-control",
+ }
+}
+
+type ObjectLockEnabled string
+
+// Enum values for ObjectLockEnabled
+const (
+ ObjectLockEnabledEnabled ObjectLockEnabled = "Enabled"
+)
+
+// Values returns all known values for ObjectLockEnabled. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectLockEnabled) Values() []ObjectLockEnabled {
+ return []ObjectLockEnabled{
+ "Enabled",
+ }
+}
+
+type ObjectLockLegalHoldStatus string
+
+// Enum values for ObjectLockLegalHoldStatus
+const (
+ ObjectLockLegalHoldStatusOn ObjectLockLegalHoldStatus = "ON"
+ ObjectLockLegalHoldStatusOff ObjectLockLegalHoldStatus = "OFF"
+)
+
+// Values returns all known values for ObjectLockLegalHoldStatus. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus {
+ return []ObjectLockLegalHoldStatus{
+ "ON",
+ "OFF",
+ }
+}
+
+type ObjectLockMode string
+
+// Enum values for ObjectLockMode
+const (
+ ObjectLockModeGovernance ObjectLockMode = "GOVERNANCE"
+ ObjectLockModeCompliance ObjectLockMode = "COMPLIANCE"
+)
+
+// Values returns all known values for ObjectLockMode. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectLockMode) Values() []ObjectLockMode {
+ return []ObjectLockMode{
+ "GOVERNANCE",
+ "COMPLIANCE",
+ }
+}
+
+type ObjectLockRetentionMode string
+
+// Enum values for ObjectLockRetentionMode
+const (
+ ObjectLockRetentionModeGovernance ObjectLockRetentionMode = "GOVERNANCE"
+ ObjectLockRetentionModeCompliance ObjectLockRetentionMode = "COMPLIANCE"
+)
+
+// Values returns all known values for ObjectLockRetentionMode. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode {
+ return []ObjectLockRetentionMode{
+ "GOVERNANCE",
+ "COMPLIANCE",
+ }
+}
+
+type ObjectOwnership string
+
+// Enum values for ObjectOwnership
+const (
+ ObjectOwnershipBucketOwnerPreferred ObjectOwnership = "BucketOwnerPreferred"
+ ObjectOwnershipObjectWriter ObjectOwnership = "ObjectWriter"
+ ObjectOwnershipBucketOwnerEnforced ObjectOwnership = "BucketOwnerEnforced"
+)
+
+// Values returns all known values for ObjectOwnership. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectOwnership) Values() []ObjectOwnership {
+ return []ObjectOwnership{
+ "BucketOwnerPreferred",
+ "ObjectWriter",
+ "BucketOwnerEnforced",
+ }
+}
+
+type ObjectStorageClass string
+
+// Enum values for ObjectStorageClass
+const (
+ ObjectStorageClassStandard ObjectStorageClass = "STANDARD"
+ ObjectStorageClassReducedRedundancy ObjectStorageClass = "REDUCED_REDUNDANCY"
+ ObjectStorageClassGlacier ObjectStorageClass = "GLACIER"
+ ObjectStorageClassStandardIa ObjectStorageClass = "STANDARD_IA"
+ ObjectStorageClassOnezoneIa ObjectStorageClass = "ONEZONE_IA"
+ ObjectStorageClassIntelligentTiering ObjectStorageClass = "INTELLIGENT_TIERING"
+ ObjectStorageClassDeepArchive ObjectStorageClass = "DEEP_ARCHIVE"
+ ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS"
+ ObjectStorageClassGlacierIr ObjectStorageClass = "GLACIER_IR"
+ ObjectStorageClassSnow ObjectStorageClass = "SNOW"
+ ObjectStorageClassExpressOnezone ObjectStorageClass = "EXPRESS_ONEZONE"
+)
+
+// Values returns all known values for ObjectStorageClass. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectStorageClass) Values() []ObjectStorageClass {
+ return []ObjectStorageClass{
+ "STANDARD",
+ "REDUCED_REDUNDANCY",
+ "GLACIER",
+ "STANDARD_IA",
+ "ONEZONE_IA",
+ "INTELLIGENT_TIERING",
+ "DEEP_ARCHIVE",
+ "OUTPOSTS",
+ "GLACIER_IR",
+ "SNOW",
+ "EXPRESS_ONEZONE",
+ }
+}
+
+type ObjectVersionStorageClass string
+
+// Enum values for ObjectVersionStorageClass
+const (
+ ObjectVersionStorageClassStandard ObjectVersionStorageClass = "STANDARD"
+)
+
+// Values returns all known values for ObjectVersionStorageClass. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass {
+ return []ObjectVersionStorageClass{
+ "STANDARD",
+ }
+}
+
+type OptionalObjectAttributes string
+
+// Enum values for OptionalObjectAttributes
+const (
+ OptionalObjectAttributesRestoreStatus OptionalObjectAttributes = "RestoreStatus"
+)
+
+// Values returns all known values for OptionalObjectAttributes. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (OptionalObjectAttributes) Values() []OptionalObjectAttributes {
+ return []OptionalObjectAttributes{
+ "RestoreStatus",
+ }
+}
+
+type OwnerOverride string
+
+// Enum values for OwnerOverride
+const (
+ OwnerOverrideDestination OwnerOverride = "Destination"
+)
+
+// Values returns all known values for OwnerOverride. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (OwnerOverride) Values() []OwnerOverride {
+ return []OwnerOverride{
+ "Destination",
+ }
+}
+
+type PartitionDateSource string
+
+// Enum values for PartitionDateSource
+const (
+ PartitionDateSourceEventTime PartitionDateSource = "EventTime"
+ PartitionDateSourceDeliveryTime PartitionDateSource = "DeliveryTime"
+)
+
+// Values returns all known values for PartitionDateSource. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (PartitionDateSource) Values() []PartitionDateSource {
+ return []PartitionDateSource{
+ "EventTime",
+ "DeliveryTime",
+ }
+}
+
+type Payer string
+
+// Enum values for Payer
+const (
+ PayerRequester Payer = "Requester"
+ PayerBucketOwner Payer = "BucketOwner"
+)
+
+// Values returns all known values for Payer. Note that this can be expanded in
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (Payer) Values() []Payer {
+ return []Payer{
+ "Requester",
+ "BucketOwner",
+ }
+}
+
+type Permission string
+
+// Enum values for Permission
+const (
+ PermissionFullControl Permission = "FULL_CONTROL"
+ PermissionWrite Permission = "WRITE"
+ PermissionWriteAcp Permission = "WRITE_ACP"
+ PermissionRead Permission = "READ"
+ PermissionReadAcp Permission = "READ_ACP"
+)
+
+// Values returns all known values for Permission. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (Permission) Values() []Permission {
+ return []Permission{
+ "FULL_CONTROL",
+ "WRITE",
+ "WRITE_ACP",
+ "READ",
+ "READ_ACP",
+ }
+}
+
+type Protocol string
+
+// Enum values for Protocol
+const (
+ ProtocolHttp Protocol = "http"
+ ProtocolHttps Protocol = "https"
+)
+
+// Values returns all known values for Protocol. Note that this can be expanded in
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (Protocol) Values() []Protocol {
+ return []Protocol{
+ "http",
+ "https",
+ }
+}
+
+type QuoteFields string
+
+// Enum values for QuoteFields
+const (
+ QuoteFieldsAlways QuoteFields = "ALWAYS"
+ QuoteFieldsAsneeded QuoteFields = "ASNEEDED"
+)
+
+// Values returns all known values for QuoteFields. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (QuoteFields) Values() []QuoteFields {
+ return []QuoteFields{
+ "ALWAYS",
+ "ASNEEDED",
+ }
+}
+
+type ReplicaModificationsStatus string
+
+// Enum values for ReplicaModificationsStatus
+const (
+ ReplicaModificationsStatusEnabled ReplicaModificationsStatus = "Enabled"
+ ReplicaModificationsStatusDisabled ReplicaModificationsStatus = "Disabled"
+)
+
+// Values returns all known values for ReplicaModificationsStatus. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus {
+ return []ReplicaModificationsStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ReplicationRuleStatus string
+
+// Enum values for ReplicationRuleStatus
+const (
+ ReplicationRuleStatusEnabled ReplicationRuleStatus = "Enabled"
+ ReplicationRuleStatusDisabled ReplicationRuleStatus = "Disabled"
+)
+
+// Values returns all known values for ReplicationRuleStatus. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReplicationRuleStatus) Values() []ReplicationRuleStatus {
+ return []ReplicationRuleStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type ReplicationStatus string
+
+// Enum values for ReplicationStatus
+const (
+ ReplicationStatusComplete ReplicationStatus = "COMPLETE"
+ ReplicationStatusPending ReplicationStatus = "PENDING"
+ ReplicationStatusFailed ReplicationStatus = "FAILED"
+ ReplicationStatusReplica ReplicationStatus = "REPLICA"
+ ReplicationStatusCompleted ReplicationStatus = "COMPLETED"
+)
+
+// Values returns all known values for ReplicationStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReplicationStatus) Values() []ReplicationStatus {
+ return []ReplicationStatus{
+ "COMPLETE",
+ "PENDING",
+ "FAILED",
+ "REPLICA",
+ "COMPLETED",
+ }
+}
+
+type ReplicationTimeStatus string
+
+// Enum values for ReplicationTimeStatus
+const (
+ ReplicationTimeStatusEnabled ReplicationTimeStatus = "Enabled"
+ ReplicationTimeStatusDisabled ReplicationTimeStatus = "Disabled"
+)
+
+// Values returns all known values for ReplicationTimeStatus. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReplicationTimeStatus) Values() []ReplicationTimeStatus {
+ return []ReplicationTimeStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type RequestCharged string
+
+// Enum values for RequestCharged
+const (
+ RequestChargedRequester RequestCharged = "requester"
+)
+
+// Values returns all known values for RequestCharged. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (RequestCharged) Values() []RequestCharged {
+ return []RequestCharged{
+ "requester",
+ }
+}
+
+type RequestPayer string
+
+// Enum values for RequestPayer
+const (
+ RequestPayerRequester RequestPayer = "requester"
+)
+
+// Values returns all known values for RequestPayer. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (RequestPayer) Values() []RequestPayer {
+ return []RequestPayer{
+ "requester",
+ }
+}
+
+type RestoreRequestType string
+
+// Enum values for RestoreRequestType
+const (
+ RestoreRequestTypeSelect RestoreRequestType = "SELECT"
+)
+
+// Values returns all known values for RestoreRequestType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (RestoreRequestType) Values() []RestoreRequestType {
+ return []RestoreRequestType{
+ "SELECT",
+ }
+}
+
+type ServerSideEncryption string
+
+// Enum values for ServerSideEncryption
+const (
+ ServerSideEncryptionAes256 ServerSideEncryption = "AES256"
+ ServerSideEncryptionAwsKms ServerSideEncryption = "aws:kms"
+ ServerSideEncryptionAwsKmsDsse ServerSideEncryption = "aws:kms:dsse"
+)
+
+// Values returns all known values for ServerSideEncryption. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ServerSideEncryption) Values() []ServerSideEncryption {
+ return []ServerSideEncryption{
+ "AES256",
+ "aws:kms",
+ "aws:kms:dsse",
+ }
+}
+
+type SessionMode string
+
+// Enum values for SessionMode
+const (
+ SessionModeReadOnly SessionMode = "ReadOnly"
+ SessionModeReadWrite SessionMode = "ReadWrite"
+)
+
+// Values returns all known values for SessionMode. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (SessionMode) Values() []SessionMode {
+ return []SessionMode{
+ "ReadOnly",
+ "ReadWrite",
+ }
+}
+
+type SseKmsEncryptedObjectsStatus string
+
+// Enum values for SseKmsEncryptedObjectsStatus
+const (
+ SseKmsEncryptedObjectsStatusEnabled SseKmsEncryptedObjectsStatus = "Enabled"
+ SseKmsEncryptedObjectsStatusDisabled SseKmsEncryptedObjectsStatus = "Disabled"
+)
+
+// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus {
+ return []SseKmsEncryptedObjectsStatus{
+ "Enabled",
+ "Disabled",
+ }
+}
+
+type StorageClass string
+
+// Enum values for StorageClass
+const (
+ StorageClassStandard StorageClass = "STANDARD"
+ StorageClassReducedRedundancy StorageClass = "REDUCED_REDUNDANCY"
+ StorageClassStandardIa StorageClass = "STANDARD_IA"
+ StorageClassOnezoneIa StorageClass = "ONEZONE_IA"
+ StorageClassIntelligentTiering StorageClass = "INTELLIGENT_TIERING"
+ StorageClassGlacier StorageClass = "GLACIER"
+ StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE"
+ StorageClassOutposts StorageClass = "OUTPOSTS"
+ StorageClassGlacierIr StorageClass = "GLACIER_IR"
+ StorageClassSnow StorageClass = "SNOW"
+ StorageClassExpressOnezone StorageClass = "EXPRESS_ONEZONE"
+)
+
+// Values returns all known values for StorageClass. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (StorageClass) Values() []StorageClass {
+ return []StorageClass{
+ "STANDARD",
+ "REDUCED_REDUNDANCY",
+ "STANDARD_IA",
+ "ONEZONE_IA",
+ "INTELLIGENT_TIERING",
+ "GLACIER",
+ "DEEP_ARCHIVE",
+ "OUTPOSTS",
+ "GLACIER_IR",
+ "SNOW",
+ "EXPRESS_ONEZONE",
+ }
+}
+
+type StorageClassAnalysisSchemaVersion string
+
+// Enum values for StorageClassAnalysisSchemaVersion
+const (
+ StorageClassAnalysisSchemaVersionV1 StorageClassAnalysisSchemaVersion = "V_1"
+)
+
+// Values returns all known values for StorageClassAnalysisSchemaVersion. Note
+// that this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion {
+ return []StorageClassAnalysisSchemaVersion{
+ "V_1",
+ }
+}
+
+type TaggingDirective string
+
+// Enum values for TaggingDirective
+const (
+ TaggingDirectiveCopy TaggingDirective = "COPY"
+ TaggingDirectiveReplace TaggingDirective = "REPLACE"
+)
+
+// Values returns all known values for TaggingDirective. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (TaggingDirective) Values() []TaggingDirective {
+ return []TaggingDirective{
+ "COPY",
+ "REPLACE",
+ }
+}
+
+type Tier string
+
+// Enum values for Tier
+const (
+ TierStandard Tier = "Standard"
+ TierBulk Tier = "Bulk"
+ TierExpedited Tier = "Expedited"
+)
+
+// Values returns all known values for Tier. Note that this can be expanded in the
+// future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (Tier) Values() []Tier {
+ return []Tier{
+ "Standard",
+ "Bulk",
+ "Expedited",
+ }
+}
+
+type TransitionDefaultMinimumObjectSize string
+
+// Enum values for TransitionDefaultMinimumObjectSize
+const (
+ TransitionDefaultMinimumObjectSizeVariesByStorageClass TransitionDefaultMinimumObjectSize = "varies_by_storage_class"
+ TransitionDefaultMinimumObjectSizeAllStorageClasses128k TransitionDefaultMinimumObjectSize = "all_storage_classes_128K"
+)
+
+// Values returns all known values for TransitionDefaultMinimumObjectSize. Note
+// that this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (TransitionDefaultMinimumObjectSize) Values() []TransitionDefaultMinimumObjectSize {
+ return []TransitionDefaultMinimumObjectSize{
+ "varies_by_storage_class",
+ "all_storage_classes_128K",
+ }
+}
+
+type TransitionStorageClass string
+
+// Enum values for TransitionStorageClass
+const (
+ TransitionStorageClassGlacier TransitionStorageClass = "GLACIER"
+ TransitionStorageClassStandardIa TransitionStorageClass = "STANDARD_IA"
+ TransitionStorageClassOnezoneIa TransitionStorageClass = "ONEZONE_IA"
+ TransitionStorageClassIntelligentTiering TransitionStorageClass = "INTELLIGENT_TIERING"
+ TransitionStorageClassDeepArchive TransitionStorageClass = "DEEP_ARCHIVE"
+ TransitionStorageClassGlacierIr TransitionStorageClass = "GLACIER_IR"
+)
+
+// Values returns all known values for TransitionStorageClass. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (TransitionStorageClass) Values() []TransitionStorageClass {
+ return []TransitionStorageClass{
+ "GLACIER",
+ "STANDARD_IA",
+ "ONEZONE_IA",
+ "INTELLIGENT_TIERING",
+ "DEEP_ARCHIVE",
+ "GLACIER_IR",
+ }
+}
+
+type Type string
+
+// Enum values for Type
+const (
+ TypeCanonicalUser Type = "CanonicalUser"
+ TypeAmazonCustomerByEmail Type = "AmazonCustomerByEmail"
+ TypeGroup Type = "Group"
+)
+
+// Values returns all known values for Type. Note that this can be expanded in the
+// future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (Type) Values() []Type {
+ return []Type{
+ "CanonicalUser",
+ "AmazonCustomerByEmail",
+ "Group",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go
new file mode 100644
index 000000000..1070573a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go
@@ -0,0 +1,382 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+)
+
+// The requested bucket name is not available. The bucket namespace is shared by
+// all users of the system. Select a different name and try again.
+type BucketAlreadyExists struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *BucketAlreadyExists) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *BucketAlreadyExists) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *BucketAlreadyExists) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "BucketAlreadyExists"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *BucketAlreadyExists) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The bucket you tried to create already exists, and you own it. Amazon S3
+// returns this error in all Amazon Web Services Regions except in the North
+// Virginia Region. For legacy compatibility, if you re-create an existing bucket
+// that you already own in the North Virginia Region, Amazon S3 returns 200 OK and
+// resets the bucket access control lists (ACLs).
+type BucketAlreadyOwnedByYou struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *BucketAlreadyOwnedByYou) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *BucketAlreadyOwnedByYou) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *BucketAlreadyOwnedByYou) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "BucketAlreadyOwnedByYou"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The existing object was created with a different encryption type. Subsequent
+//
+// write requests must include the appropriate encryption parameters in the request
+// or while creating the session.
+type EncryptionTypeMismatch struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *EncryptionTypeMismatch) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *EncryptionTypeMismatch) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *EncryptionTypeMismatch) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "EncryptionTypeMismatch"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *EncryptionTypeMismatch) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Object is archived and inaccessible until restored.
+//
+// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval
+// storage class, the S3 Glacier Deep Archive storage class, the S3
+// Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep
+// Archive Access tier, before you can retrieve the object you must first restore a
+// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For
+// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide.
+//
+// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
+// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html
+type InvalidObjectState struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ StorageClass StorageClass
+ AccessTier IntelligentTieringAccessTier
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidObjectState) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidObjectState) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidObjectState) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidObjectState"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// You may receive this error in multiple cases. Depending on the reason for the
+// error, you may receive one of the messages below:
+//
+// - Cannot specify both a write offset value and user-defined object metadata
+// for existing objects.
+//
+// - Checksum Type mismatch occurred, expected checksum Type: sha1, actual
+// checksum Type: crc32c.
+//
+// - Request body cannot be empty when 'write offset' is specified.
+type InvalidRequest struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidRequest) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRequest) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRequest) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRequest"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidRequest) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The write offset value that you specified does not match the current object
+//
+// size.
+type InvalidWriteOffset struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidWriteOffset) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidWriteOffset) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidWriteOffset) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidWriteOffset"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidWriteOffset) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified bucket does not exist.
+type NoSuchBucket struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *NoSuchBucket) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *NoSuchBucket) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *NoSuchBucket) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "NoSuchBucket"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *NoSuchBucket) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified key does not exist.
+type NoSuchKey struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *NoSuchKey) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *NoSuchKey) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *NoSuchKey) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "NoSuchKey"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *NoSuchKey) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified multipart upload does not exist.
+type NoSuchUpload struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *NoSuchUpload) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *NoSuchUpload) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *NoSuchUpload) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "NoSuchUpload"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *NoSuchUpload) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified content does not exist.
+type NotFound struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *NotFound) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *NotFound) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *NotFound) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "NotFound"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *NotFound) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// This action is not allowed against this storage tier.
+type ObjectAlreadyInActiveTierError struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ObjectAlreadyInActiveTierError) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ObjectAlreadyInActiveTierError) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ObjectAlreadyInActiveTierError) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ObjectAlreadyInActiveTierError"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ObjectAlreadyInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The source object of the COPY action is not in the active tier and is only
+// stored in Amazon S3 Glacier.
+type ObjectNotInActiveTierError struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ObjectNotInActiveTierError) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ObjectNotInActiveTierError) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ObjectNotInActiveTierError) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ObjectNotInActiveTierError"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// You have attempted to add more parts than the maximum of 10000 that are
+//
+// allowed for this object. You can use the CopyObject operation to copy this
+// object to another and then add more data to the newly copied object.
+type TooManyParts struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *TooManyParts) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TooManyParts) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *TooManyParts) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "TooManyParts"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *TooManyParts) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go
new file mode 100644
index 000000000..2d5a284a2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go
@@ -0,0 +1,4477 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ smithydocument "github.com/aws/smithy-go/document"
+ "time"
+)
+
+// Specifies the days since the initiation of an incomplete multipart upload that
+// Amazon S3 will wait before permanently removing all parts of the upload. For
+// more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide.
+//
+// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
+type AbortIncompleteMultipartUpload struct {
+
+ // Specifies the number of days after which Amazon S3 aborts an incomplete
+ // multipart upload.
+ DaysAfterInitiation *int32
+
+ noSmithyDocumentSerde
+}
+
+// Configures the transfer acceleration state for an Amazon S3 bucket. For more
+// information, see [Amazon S3 Transfer Acceleration]in the Amazon S3 User Guide.
+//
+// [Amazon S3 Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+type AccelerateConfiguration struct {
+
+ // Specifies the transfer acceleration status of the bucket.
+ Status BucketAccelerateStatus
+
+ noSmithyDocumentSerde
+}
+
+// Contains the elements that set the ACL permissions for an object per grantee.
+type AccessControlPolicy struct {
+
+ // A list of grants.
+ Grants []Grant
+
+ // Container for the bucket owner's display name and ID.
+ Owner *Owner
+
+ noSmithyDocumentSerde
+}
+
+// A container for information about access control for replicas.
+type AccessControlTranslation struct {
+
+ // Specifies the replica ownership. For default and valid values, see [PUT bucket replication] in the
+ // Amazon S3 API Reference.
+ //
+ // [PUT bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html
+ //
+ // This member is required.
+ Owner OwnerOverride
+
+ noSmithyDocumentSerde
+}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating a
+// metrics filter. The operator must have at least two predicates in any
+// combination, and an object must match all of the predicates for the filter to
+// apply.
+type AnalyticsAndOperator struct {
+
+ // The prefix to use when evaluating an AND predicate: The prefix that an object
+ // must have to be included in the metrics results.
+ Prefix *string
+
+ // The list of tags to use when evaluating an AND predicate.
+ Tags []Tag
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the configuration and any analyses for the analytics filter of an
+// Amazon S3 bucket.
+type AnalyticsConfiguration struct {
+
+ // The ID that identifies the analytics configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Contains data related to access patterns to be collected and made available to
+ // analyze the tradeoffs between different storage classes.
+ //
+ // This member is required.
+ StorageClassAnalysis *StorageClassAnalysis
+
+ // The filter used to describe a set of objects for analyses. A filter must have
+ // exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no
+ // filter is provided, all objects will be considered in any analysis.
+ Filter AnalyticsFilter
+
+ noSmithyDocumentSerde
+}
+
+// Where to publish the analytics results.
+type AnalyticsExportDestination struct {
+
+ // A destination signifying output to an S3 bucket.
+ //
+ // This member is required.
+ S3BucketDestination *AnalyticsS3BucketDestination
+
+ noSmithyDocumentSerde
+}
+
+// The filter used to describe a set of objects for analyses. A filter must have
+// exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). If no
+// filter is provided, all objects will be considered in any analysis.
+//
+// The following types satisfy this interface:
+//
+// AnalyticsFilterMemberAnd
+// AnalyticsFilterMemberPrefix
+// AnalyticsFilterMemberTag
+type AnalyticsFilter interface {
+ isAnalyticsFilter()
+}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating an
+// analytics filter. The operator must have at least two predicates.
+type AnalyticsFilterMemberAnd struct {
+ Value AnalyticsAndOperator
+
+ noSmithyDocumentSerde
+}
+
+func (*AnalyticsFilterMemberAnd) isAnalyticsFilter() {}
+
+// The prefix to use when evaluating an analytics filter.
+type AnalyticsFilterMemberPrefix struct {
+ Value string
+
+ noSmithyDocumentSerde
+}
+
+func (*AnalyticsFilterMemberPrefix) isAnalyticsFilter() {}
+
+// The tag to use when evaluating an analytics filter.
+type AnalyticsFilterMemberTag struct {
+ Value Tag
+
+ noSmithyDocumentSerde
+}
+
+func (*AnalyticsFilterMemberTag) isAnalyticsFilter() {}
+
+// Contains information about where to publish the analytics results.
+type AnalyticsS3BucketDestination struct {
+
+ // The Amazon Resource Name (ARN) of the bucket to which data is exported.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the file format used when exporting data to Amazon S3.
+ //
+ // This member is required.
+ Format AnalyticsS3ExportFileFormat
+
+ // The account ID that owns the destination S3 bucket. If no account ID is
+ // provided, the owner is not validated before exporting data.
+ //
+ // Although this value is optional, we strongly recommend that you set it to help
+ // prevent problems if the destination bucket ownership changes.
+ BucketAccountId *string
+
+ // The prefix to use when exporting data. The prefix is prepended to all results.
+ Prefix *string
+
+ noSmithyDocumentSerde
+}
+
+// In terms of implementation, a Bucket is a resource.
+type Bucket struct {
+
+ // BucketRegion indicates the Amazon Web Services region where the bucket is
+ // located. If the request contains at least one valid parameter, it is included in
+ // the response.
+ BucketRegion *string
+
+ // Date the bucket was created. This date can change when making changes to your
+ // bucket, such as editing its bucket policy.
+ CreationDate *time.Time
+
+ // The name of the bucket.
+ Name *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the information about the bucket that will be created. For more
+// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide.
+//
+// This functionality is only supported by directory buckets.
+//
+// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
+type BucketInfo struct {
+
+ // The number of Zone (Availability Zone or Local Zone) that's used for redundancy
+ // for the bucket.
+ DataRedundancy DataRedundancy
+
+ // The type of bucket.
+ Type BucketType
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For
+// more information, see [Object Lifecycle Management]in the Amazon S3 User Guide.
+//
+// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
+type BucketLifecycleConfiguration struct {
+
+ // A lifecycle rule for individual objects in an Amazon S3 bucket.
+ //
+ // This member is required.
+ Rules []LifecycleRule
+
+ noSmithyDocumentSerde
+}
+
+// Container for logging status information.
+type BucketLoggingStatus struct {
+
+ // Describes where logs are stored and the prefix that Amazon S3 assigns to all
+ // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API
+ // Reference.
+ //
+ // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html
+ LoggingEnabled *LoggingEnabled
+
+ noSmithyDocumentSerde
+}
+
+// Contains all the possible checksum or digest values for an object.
+type Checksum struct {
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only
+ // be present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only
+ // present if the checksum was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumCRC32C *string
+
+ // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is
+ // present if the object was uploaded with the CRC64NVME checksum algorithm, or if
+ // the object was uploaded without a checksum (and Amazon S3 added the default
+ // checksum, CRC64NVME , to the uploaded object). For more information, see [Checking object integrity] in
+ // the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use the API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be
+ // present if the object was uploaded with the object. When you use an API
+ // operation on an object that was uploaded using multipart uploads, this value may
+ // not be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information about
+ // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User
+ // Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
+ ChecksumSHA256 *string
+
+ // The checksum type that is used to calculate the object’s checksum value. For
+ // more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType ChecksumType
+
+ noSmithyDocumentSerde
+}
+
+// Container for all (if there are any) keys between Prefix and the next
+// occurrence of the string specified by a delimiter. CommonPrefixes lists keys
+// that act like subdirectories in the directory specified by Prefix. For example,
+// if the prefix is notes/ and the delimiter is a slash (/) as in
+// notes/summer/july, the common prefix is notes/summer/.
+type CommonPrefix struct {
+
+ // Container for the specified common prefix.
+ Prefix *string
+
+ noSmithyDocumentSerde
+}
+
+// The container for the completed multipart upload details.
+type CompletedMultipartUpload struct {
+
+ // Array of CompletedPart data types.
+ //
+ // If you do not supply a valid Part with your request, the service sends back an
+ // HTTP 400 response.
+ Parts []CompletedPart
+
+ noSmithyDocumentSerde
+}
+
+// Details of the parts that were uploaded.
+type CompletedPart struct {
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present
+ // if the multipart upload request was created with the CRC32 checksum algorithm.
+ // For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is
+ // present if the multipart upload request was created with the CRC32C checksum
+ // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is
+ // present if the multipart upload request was created with the CRC64NVME checksum
+ // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3
+ // User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present
+ // if the multipart upload request was created with the SHA1 checksum algorithm.
+ // For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is
+ // present if the multipart upload request was created with the SHA256 checksum
+ // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string
+
+ // Part number that identifies the part. This is a positive integer between 1 and
+ // 10,000.
+ //
+ // - General purpose buckets - In CompleteMultipartUpload , when a additional
+ // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c ,
+ // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the
+ // PartNumber must start at 1 and the part numbers must be consecutive.
+ // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an
+ // InvalidPartOrder error code.
+ //
+ // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start
+ // at 1 and the part numbers must be consecutive.
+ PartNumber *int32
+
+ noSmithyDocumentSerde
+}
+
+// A container for describing a condition that must be met for the specified
+// redirect to apply. For example, 1. If request is for pages in the /docs folder,
+// redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+// redirect request to another host where you might process the error.
+type Condition struct {
+
+ // The HTTP error code when the redirect is applied. In the event of an error, if
+ // the error code equals this value, then the specified redirect is applied.
+ // Required when parent element Condition is specified and sibling KeyPrefixEquals
+ // is not specified. If both are specified, then both must be true for the redirect
+ // to be applied.
+ HttpErrorCodeReturnedEquals *string
+
+ // The object key name prefix when the redirect is applied. For example, to
+ // redirect requests for ExamplePage.html , the key prefix will be ExamplePage.html
+ // . To redirect request for all pages with the prefix docs/ , the key prefix will
+ // be /docs , which identifies all objects in the docs/ folder. Required when the
+ // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals
+ // is not specified. If both conditions are specified, both must be true for the
+ // redirect to be applied.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ KeyPrefixEquals *string
+
+ noSmithyDocumentSerde
+}
+
+type ContinuationEvent struct {
+ noSmithyDocumentSerde
+}
+
+// Container for all response elements.
+type CopyObjectResult struct {
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only
+ // present if the object was uploaded with the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be
+ // present if the object was uploaded with the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is
+ // present if the object being copied was uploaded with the CRC64NVME checksum
+ // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added
+ // the default checksum, CRC64NVME , to the uploaded object). For more information,
+ // see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be
+ // present if the object was uploaded with the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be
+ // present if the object was uploaded with the object. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // The checksum type that is used to calculate the object’s checksum value. For
+ // more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType ChecksumType
+
+ // Returns the ETag of the new object. The ETag reflects only changes to the
+ // contents of an object, not its metadata.
+ ETag *string
+
+ // Creation date of the object.
+ LastModified *time.Time
+
+ noSmithyDocumentSerde
+}
+
+// Container for all response elements.
+type CopyPartResult struct {
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 32-bit CRC32 checksum of the part. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 32-bit CRC32C checksum of the part. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is
+ // present if the multipart upload request was created with the CRC64NVME checksum
+ // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3
+ // User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 160-bit SHA1 checksum of the part. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // This header can be used as a data integrity check to verify that the data
+ // received is the same data that was originally sent. This header specifies the
+ // Base64 encoded, 256-bit SHA256 checksum of the part. For more information, see [Checking object integrity]
+ // in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // Entity tag of the object.
+ ETag *string
+
+ // Date and time at which the object was uploaded.
+ LastModified *time.Time
+
+ noSmithyDocumentSerde
+}
+
+// Describes the cross-origin access configuration for objects in an Amazon S3
+// bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide.
+//
+// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html
+type CORSConfiguration struct {
+
+ // A set of origins and methods (cross-origin access that you want to allow). You
+ // can add up to 100 rules to the configuration.
+ //
+ // This member is required.
+ CORSRules []CORSRule
+
+ noSmithyDocumentSerde
+}
+
+// Specifies a cross-origin access rule for an Amazon S3 bucket.
+type CORSRule struct {
+
+ // An HTTP method that you allow the origin to execute. Valid values are GET , PUT
+ // , HEAD , POST , and DELETE .
+ //
+ // This member is required.
+ AllowedMethods []string
+
+ // One or more origins you want customers to be able to access the bucket from.
+ //
+ // This member is required.
+ AllowedOrigins []string
+
+ // Headers that are specified in the Access-Control-Request-Headers header. These
+ // headers are allowed in a preflight OPTIONS request. In response to any preflight
+ // OPTIONS request, Amazon S3 returns any requested headers that are allowed.
+ AllowedHeaders []string
+
+ // One or more headers in the response that you want customers to be able to
+ // access from their applications (for example, from a JavaScript XMLHttpRequest
+ // object).
+ ExposeHeaders []string
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string
+
+ // The time in seconds that your browser is to cache the preflight response for
+ // the specified resource.
+ MaxAgeSeconds *int32
+
+ noSmithyDocumentSerde
+}
+
+// The configuration information for the bucket.
+type CreateBucketConfiguration struct {
+
+ // Specifies the information about the bucket that will be created.
+ //
+ // This functionality is only supported by directory buckets.
+ Bucket *BucketInfo
+
+ // Specifies the location where the bucket will be created.
+ //
+ // Directory buckets - The location type is Availability Zone or Local Zone. To
+ // use the Local Zone location type, your account must be enabled for Dedicated
+ // Local Zones. Otherwise, you get an HTTP 403 Forbidden error with the error code
+ // AccessDenied . To learn more, see [Enable accounts for Dedicated Local Zones] in the Amazon S3 User Guide.
+ //
+ // This functionality is only supported by directory buckets.
+ //
+ // [Enable accounts for Dedicated Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/opt-in-directory-bucket-lz.html
+ Location *LocationInfo
+
+ // Specifies the Region where the bucket will be created. You might choose a
+ // Region to optimize latency, minimize costs, or address regulatory requirements.
+ // For example, if you reside in Europe, you will probably find it advantageous to
+ // create buckets in the Europe (Ireland) Region.
+ //
+ // If you don't specify a Region, the bucket is created in the US East (N.
+ // Virginia) Region (us-east-1) by default. Configurations using the value EU will
+ // create a bucket in eu-west-1 .
+ //
+ // For a list of the valid values for all of the Amazon Web Services Regions, see [Regions and Endpoints].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ LocationConstraint BucketLocationConstraint
+
+ noSmithyDocumentSerde
+}
+
+// Describes how an uncompressed comma-separated values (CSV)-formatted input
+// object is formatted.
+type CSVInput struct {
+
+ // Specifies that CSV field values may contain quoted record delimiters and such
+ // records should be allowed. Default value is FALSE. Setting this value to TRUE
+ // may lower performance.
+ AllowQuotedRecordDelimiter *bool
+
+ // A single character used to indicate that a row should be ignored when the
+ // character is present at the start of that row. You can specify any character to
+ // indicate a comment line. The default character is # .
+ //
+ // Default: #
+ Comments *string
+
+ // A single character used to separate individual fields in a record. You can
+ // specify an arbitrary delimiter.
+ FieldDelimiter *string
+
+ // Describes the first line of input. Valid values are:
+ //
+ // - NONE : First line is not a header.
+ //
+ // - IGNORE : First line is a header, but you can't use the header values to
+ // indicate the column in an expression. You can use column position (such as _1,
+ // _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ).
+ //
+ // - Use : First line is a header, and you can use the header value to identify a
+ // column in an expression ( SELECT "name" FROM OBJECT ).
+ FileHeaderInfo FileHeaderInfo
+
+ // A single character used for escaping when the field delimiter is part of the
+ // value. For example, if the value is a, b , Amazon S3 wraps this field value in
+ // quotation marks, as follows: " a , b " .
+ //
+ // Type: String
+ //
+ // Default: "
+ //
+ // Ancestors: CSV
+ QuoteCharacter *string
+
+ // A single character used for escaping the quotation mark character inside an
+ // already escaped value. For example, the value """ a , b """ is parsed as " a ,
+ // b " .
+ QuoteEscapeCharacter *string
+
+ // A single character used to separate individual records in the input. Instead of
+ // the default value, you can specify an arbitrary delimiter.
+ RecordDelimiter *string
+
+ noSmithyDocumentSerde
+}
+
+// Describes how uncompressed comma-separated values (CSV)-formatted results are
+// formatted.
+type CSVOutput struct {
+
+ // The value used to separate individual fields in a record. You can specify an
+ // arbitrary delimiter.
+ FieldDelimiter *string
+
+ // A single character used for escaping when the field delimiter is part of the
+ // value. For example, if the value is a, b , Amazon S3 wraps this field value in
+ // quotation marks, as follows: " a , b " .
+ QuoteCharacter *string
+
+ // The single character used for escaping the quote character inside an already
+ // escaped value.
+ QuoteEscapeCharacter *string
+
+ // Indicates whether to use quotation marks around output fields.
+ //
+ // - ALWAYS : Always use quotation marks for output fields.
+ //
+ // - ASNEEDED : Use quotation marks for output fields when needed.
+ QuoteFields QuoteFields
+
+ // A single character used to separate individual records in the output. Instead
+ // of the default value, you can specify an arbitrary delimiter.
+ RecordDelimiter *string
+
+ noSmithyDocumentSerde
+}
+
+// The container element for optionally specifying the default Object Lock
+// retention settings for new objects placed in the specified bucket.
+//
+// - The DefaultRetention settings require both a mode and a period.
+//
+// - The DefaultRetention period can be either Days or Years but you must select
+// one. You cannot specify Days and Years at the same time.
+type DefaultRetention struct {
+
+ // The number of days that you want to specify for the default retention period.
+ // Must be used with Mode .
+ Days *int32
+
+ // The default Object Lock retention mode you want to apply to new objects placed
+ // in the specified bucket. Must be used with either Days or Years .
+ Mode ObjectLockRetentionMode
+
+ // The number of years that you want to specify for the default retention period.
+ // Must be used with Mode .
+ Years *int32
+
+ noSmithyDocumentSerde
+}
+
+// Container for the objects to delete.
+type Delete struct {
+
+ // The object to delete.
+ //
+ // Directory buckets - For directory buckets, an object that's composed entirely
+ // of whitespace characters is not supported by the DeleteObjects API operation.
+ // The request will receive a 400 Bad Request error and none of the objects in the
+ // request will be deleted.
+ //
+ // This member is required.
+ Objects []ObjectIdentifier
+
+ // Element to enable quiet mode for the request. When you add this element, you
+ // must set its value to true .
+ Quiet *bool
+
+ noSmithyDocumentSerde
+}
+
+// Information about the deleted object.
+type DeletedObject struct {
+
+ // Indicates whether the specified object version that was permanently deleted was
+ // (true) or was not (false) a delete marker before deletion. In a simple DELETE,
+ // this header indicates whether (true) or not (false) the current version of the
+ // object is a delete marker. To learn more about delete markers, see [Working with delete markers].
+ //
+ // This functionality is not supported for directory buckets.
+ //
+ // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html
+ DeleteMarker *bool
+
+ // The version ID of the delete marker created as a result of the DELETE
+ // operation. If you delete a specific object version, the value returned by this
+ // header is the version ID of the object version deleted.
+ //
+ // This functionality is not supported for directory buckets.
+ DeleteMarkerVersionId *string
+
+ // The name of the deleted object.
+ Key *string
+
+ // The version ID of the deleted object.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+// Information about the delete marker.
+type DeleteMarkerEntry struct {
+
+ // Specifies whether the object is (true) or is not (false) the latest version of
+ // an object.
+ IsLatest *bool
+
+ // The object key.
+ Key *string
+
+ // Date and time when the object was last modified.
+ LastModified *time.Time
+
+ // The account that created the delete marker.
+ Owner *Owner
+
+ // Version ID of an object.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter
+// in your replication configuration, you must also include a
+// DeleteMarkerReplication element. If your Filter includes a Tag element, the
+// DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does
+// not support replicating delete markers for tag-based rules. For an example
+// configuration, see [Basic Rule Configuration].
+//
+// For more information about delete marker replication, see [Basic Rule Configuration].
+//
+// If you are using an earlier version of the replication configuration, Amazon S3
+// handles replication of delete markers differently. For more information, see [Backward Compatibility].
+//
+// [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html
+// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations
+type DeleteMarkerReplication struct {
+
+ // Indicates whether to replicate delete markers.
+ //
+ // Indicates whether to replicate delete markers.
+ Status DeleteMarkerReplicationStatus
+
+ noSmithyDocumentSerde
+}
+
+// Specifies information about where to publish analysis or configuration results
+// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC).
+type Destination struct {
+
+ // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store
+ // the results.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specify this only in a cross-account scenario (where source and destination
+ // bucket owners are not the same), and you want to change replica ownership to the
+ // Amazon Web Services account that owns the destination bucket. If this is not
+ // specified in the replication configuration, the replicas are owned by same
+ // Amazon Web Services account that owns the source object.
+ AccessControlTranslation *AccessControlTranslation
+
+ // Destination bucket owner account ID. In a cross-account scenario, if you direct
+ // Amazon S3 to change replica ownership to the Amazon Web Services account that
+ // owns the destination bucket by specifying the AccessControlTranslation
+ // property, this is the account ID of the destination bucket owner. For more
+ // information, see [Replication Additional Configuration: Changing the Replica Owner]in the Amazon S3 User Guide.
+ //
+ // [Replication Additional Configuration: Changing the Replica Owner]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html
+ Account *string
+
+ // A container that provides information about encryption. If
+ // SourceSelectionCriteria is specified, you must specify this element.
+ EncryptionConfiguration *EncryptionConfiguration
+
+ // A container specifying replication metrics-related settings enabling
+ // replication metrics and events.
+ Metrics *Metrics
+
+ // A container specifying S3 Replication Time Control (S3 RTC), including whether
+ // S3 RTC is enabled and the time when all objects and operations on objects must
+ // be replicated. Must be specified together with a Metrics block.
+ ReplicationTime *ReplicationTime
+
+ // The storage class to use when replicating objects, such as S3 Standard or
+ // reduced redundancy. By default, Amazon S3 uses the storage class of the source
+ // object to create the object replica.
+ //
+ // For valid values, see the StorageClass element of the [PUT Bucket replication] action in the Amazon S3
+ // API Reference.
+ //
+ // [PUT Bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html
+ StorageClass StorageClass
+
+ noSmithyDocumentSerde
+}
+
+// Contains the type of server-side encryption used.
+type Encryption struct {
+
+ // The server-side encryption algorithm used when storing job results in Amazon S3
+ // (for example, AES256, aws:kms ).
+ //
+ // This member is required.
+ EncryptionType ServerSideEncryption
+
+ // If the encryption type is aws:kms , this optional value can be used to specify
+ // the encryption context for the restore results.
+ KMSContext *string
+
+ // If the encryption type is aws:kms , this optional value specifies the ID of the
+ // symmetric encryption customer managed key to use for encryption of job results.
+ // Amazon S3 only supports symmetric encryption KMS keys. For more information, see
+ // [Asymmetric keys in KMS]in the Amazon Web Services Key Management Service Developer Guide.
+ //
+ // [Asymmetric keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html
+ KMSKeyId *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies encryption-related information for an Amazon S3 bucket that is a
+// destination for replicated objects.
+//
+// If you're specifying a customer managed KMS key, we recommend using a fully
+// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the
+// key within the requester’s account. This behavior can result in data that's
+// encrypted with a KMS key that belongs to the requester, and not the bucket
+// owner.
+type EncryptionConfiguration struct {
+
+ // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web
+ // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
+ // the destination bucket. Amazon S3 uses this key to encrypt replica objects.
+ // Amazon S3 only supports symmetric encryption KMS keys. For more information, see
+ // [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide.
+ //
+ // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html
+ ReplicaKmsKeyID *string
+
+ noSmithyDocumentSerde
+}
+
+// A message that indicates the request is complete and no more messages will be
+// sent. You should not assume that the request is complete until the client
+// receives an EndEvent .
+type EndEvent struct {
+ noSmithyDocumentSerde
+}
+
+// Container for all error elements.
+type Error struct {
+
+ // The error code is a string that uniquely identifies an error condition. It is
+ // meant to be read and understood by programs that detect and handle errors by
+ // type. The following is a list of Amazon S3 error codes. For more information,
+ // see [Error responses].
+ //
+ // - Code: AccessDenied
+ //
+ // - Description: Access Denied
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: AccountProblem
+ //
+ // - Description: There is a problem with your Amazon Web Services account that
+ // prevents the action from completing successfully. Contact Amazon Web Services
+ // Support for further assistance.
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: AllAccessDisabled
+ //
+ // - Description: All access to this Amazon S3 resource has been disabled.
+ // Contact Amazon Web Services Support for further assistance.
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: AmbiguousGrantByEmailAddress
+ //
+ // - Description: The email address you provided is associated with more than
+ // one account.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: AuthorizationHeaderMalformed
+ //
+ // - Description: The authorization header you provided is invalid.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - HTTP Status Code: N/A
+ //
+ // - Code: BadDigest
+ //
+ // - Description: The Content-MD5 you specified did not match what we received.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: BucketAlreadyExists
+ //
+ // - Description: The requested bucket name is not available. The bucket
+ // namespace is shared by all users of the system. Please select a different name
+ // and try again.
+ //
+ // - HTTP Status Code: 409 Conflict
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: BucketAlreadyOwnedByYou
+ //
+ // - Description: The bucket you tried to create already exists, and you own it.
+ // Amazon S3 returns this error in all Amazon Web Services Regions except in the
+ // North Virginia Region. For legacy compatibility, if you re-create an existing
+ // bucket that you already own in the North Virginia Region, Amazon S3 returns 200
+ // OK and resets the bucket access control lists (ACLs).
+ //
+ // - Code: 409 Conflict (in all Regions except the North Virginia Region)
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: BucketNotEmpty
+ //
+ // - Description: The bucket you tried to delete is not empty.
+ //
+ // - HTTP Status Code: 409 Conflict
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: CredentialsNotSupported
+ //
+ // - Description: This request does not support credentials.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: CrossLocationLoggingProhibited
+ //
+ // - Description: Cross-location logging not allowed. Buckets in one geographic
+ // location cannot log information to a bucket in another location.
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: EntityTooSmall
+ //
+ // - Description: Your proposed upload is smaller than the minimum allowed
+ // object size.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: EntityTooLarge
+ //
+ // - Description: Your proposed upload exceeds the maximum allowed object size.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: ExpiredToken
+ //
+ // - Description: The provided token has expired.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: IllegalVersioningConfigurationException
+ //
+ // - Description: Indicates that the versioning configuration specified in the
+ // request is invalid.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: IncompleteBody
+ //
+ // - Description: You did not provide the number of bytes specified by the
+ // Content-Length HTTP header
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: IncorrectNumberOfFilesInPostRequest
+ //
+ // - Description: POST requires exactly one file upload per request.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InlineDataTooLarge
+ //
+ // - Description: Inline data exceeds the maximum allowed size.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InternalError
+ //
+ // - Description: We encountered an internal error. Please try again.
+ //
+ // - HTTP Status Code: 500 Internal Server Error
+ //
+ // - SOAP Fault Code Prefix: Server
+ //
+ // - Code: InvalidAccessKeyId
+ //
+ // - Description: The Amazon Web Services access key ID you provided does not
+ // exist in our records.
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidAddressingHeader
+ //
+ // - Description: You must specify the Anonymous role.
+ //
+ // - HTTP Status Code: N/A
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidArgument
+ //
+ // - Description: Invalid Argument
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidBucketName
+ //
+ // - Description: The specified bucket is not valid.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidBucketState
+ //
+ // - Description: The request is not valid with the current state of the bucket.
+ //
+ // - HTTP Status Code: 409 Conflict
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidDigest
+ //
+ // - Description: The Content-MD5 you specified is not valid.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidEncryptionAlgorithmError
+ //
+ // - Description: The encryption request you specified is not valid. The valid
+ // value is AES256.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidLocationConstraint
+ //
+ // - Description: The specified location constraint is not valid. For more
+ // information about Regions, see [How to Select a Region for Your Buckets].
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidObjectState
+ //
+ // - Description: The action is not valid for the current state of the object.
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidPart
+ //
+ // - Description: One or more of the specified parts could not be found. The
+ // part might not have been uploaded, or the specified entity tag might not have
+ // matched the part's entity tag.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidPartOrder
+ //
+ // - Description: The list of parts was not in ascending order. Parts list must
+ // be specified in order by part number.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidPayer
+ //
+ // - Description: All access to this object has been disabled. Please contact
+ // Amazon Web Services Support for further assistance.
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidPolicyDocument
+ //
+ // - Description: The content of the form does not meet the conditions specified
+ // in the policy document.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidRange
+ //
+ // - Description: The requested range cannot be satisfied.
+ //
+ // - HTTP Status Code: 416 Requested Range Not Satisfiable
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidRequest
+ //
+ // - Description: Please use AWS4-HMAC-SHA256 .
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - Code: N/A
+ //
+ // - Code: InvalidRequest
+ //
+ // - Description: SOAP requests must be made over an HTTPS connection.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidRequest
+ //
+ // - Description: Amazon S3 Transfer Acceleration is not supported for buckets
+ // with non-DNS compliant names.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - Code: N/A
+ //
+ // - Code: InvalidRequest
+ //
+ // - Description: Amazon S3 Transfer Acceleration is not supported for buckets
+ // with periods (.) in their names.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - Code: N/A
+ //
+ // - Code: InvalidRequest
+ //
+ // - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual
+ // style requests.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - Code: N/A
+ //
+ // - Code: InvalidRequest
+ //
+ // - Description: Amazon S3 Transfer Accelerate is not configured on this bucket.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - Code: N/A
+ //
+ // - Code: InvalidRequest
+ //
+ // - Description: Amazon S3 Transfer Accelerate is disabled on this bucket.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - Code: N/A
+ //
+ // - Code: InvalidRequest
+ //
+ // - Description: Amazon S3 Transfer Acceleration is not supported on this
+ // bucket. Contact Amazon Web Services Support for more information.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - Code: N/A
+ //
+ // - Code: InvalidRequest
+ //
+ // - Description: Amazon S3 Transfer Acceleration cannot be enabled on this
+ // bucket. Contact Amazon Web Services Support for more information.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - Code: N/A
+ //
+ // - Code: InvalidSecurity
+ //
+ // - Description: The provided security credentials are not valid.
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidSOAPRequest
+ //
+ // - Description: The SOAP request body is invalid.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidStorageClass
+ //
+ // - Description: The storage class you specified is not valid.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidTargetBucketForLogging
+ //
+ // - Description: The target bucket for logging does not exist, is not owned by
+ // you, or does not have the appropriate grants for the log-delivery group.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidToken
+ //
+ // - Description: The provided token is malformed or otherwise invalid.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: InvalidURI
+ //
+ // - Description: Couldn't parse the specified URI.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: KeyTooLongError
+ //
+ // - Description: Your key is too long.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MalformedACLError
+ //
+ // - Description: The XML you provided was not well-formed or did not validate
+ // against our published schema.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MalformedPOSTRequest
+ //
+ // - Description: The body of your POST request is not well-formed
+ // multipart/form-data.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MalformedXML
+ //
+ // - Description: This happens when the user sends malformed XML (XML that
+ // doesn't conform to the published XSD) for the configuration. The error message
+ // is, "The XML you provided was not well-formed or did not validate against our
+ // published schema."
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MaxMessageLengthExceeded
+ //
+ // - Description: Your request was too big.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MaxPostPreDataLengthExceededError
+ //
+ // - Description: Your POST request fields preceding the upload file were too
+ // large.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MetadataTooLarge
+ //
+ // - Description: Your metadata headers exceed the maximum allowed metadata size.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MethodNotAllowed
+ //
+ // - Description: The specified method is not allowed against this resource.
+ //
+ // - HTTP Status Code: 405 Method Not Allowed
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MissingAttachment
+ //
+ // - Description: A SOAP attachment was expected, but none were found.
+ //
+ // - HTTP Status Code: N/A
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MissingContentLength
+ //
+ // - Description: You must provide the Content-Length HTTP header.
+ //
+ // - HTTP Status Code: 411 Length Required
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MissingRequestBodyError
+ //
+ // - Description: This happens when the user sends an empty XML document as a
+ // request. The error message is, "Request body is empty."
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MissingSecurityElement
+ //
+ // - Description: The SOAP 1.1 request is missing a security element.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: MissingSecurityHeader
+ //
+ // - Description: Your request is missing a required header.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: NoLoggingStatusForKey
+ //
+ // - Description: There is no such thing as a logging status subresource for a
+ // key.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: NoSuchBucket
+ //
+ // - Description: The specified bucket does not exist.
+ //
+ // - HTTP Status Code: 404 Not Found
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: NoSuchBucketPolicy
+ //
+ // - Description: The specified bucket does not have a bucket policy.
+ //
+ // - HTTP Status Code: 404 Not Found
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: NoSuchKey
+ //
+ // - Description: The specified key does not exist.
+ //
+ // - HTTP Status Code: 404 Not Found
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: NoSuchLifecycleConfiguration
+ //
+ // - Description: The lifecycle configuration does not exist.
+ //
+ // - HTTP Status Code: 404 Not Found
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: NoSuchUpload
+ //
+ // - Description: The specified multipart upload does not exist. The upload ID
+ // might be invalid, or the multipart upload might have been aborted or completed.
+ //
+ // - HTTP Status Code: 404 Not Found
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: NoSuchVersion
+ //
+ // - Description: Indicates that the version ID specified in the request does
+ // not match an existing version.
+ //
+ // - HTTP Status Code: 404 Not Found
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: NotImplemented
+ //
+ // - Description: A header you provided implies functionality that is not
+ // implemented.
+ //
+ // - HTTP Status Code: 501 Not Implemented
+ //
+ // - SOAP Fault Code Prefix: Server
+ //
+ // - Code: NotSignedUp
+ //
+ // - Description: Your account is not signed up for the Amazon S3 service. You
+ // must sign up before you can use Amazon S3. You can sign up at the following URL:
+ // [Amazon S3]
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: OperationAborted
+ //
+ // - Description: A conflicting conditional action is currently in progress
+ // against this resource. Try again.
+ //
+ // - HTTP Status Code: 409 Conflict
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: PermanentRedirect
+ //
+ // - Description: The bucket you are attempting to access must be addressed
+ // using the specified endpoint. Send all future requests to this endpoint.
+ //
+ // - HTTP Status Code: 301 Moved Permanently
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: PreconditionFailed
+ //
+ // - Description: At least one of the preconditions you specified did not hold.
+ //
+ // - HTTP Status Code: 412 Precondition Failed
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: Redirect
+ //
+ // - Description: Temporary redirect.
+ //
+ // - HTTP Status Code: 307 Moved Temporarily
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: RestoreAlreadyInProgress
+ //
+ // - Description: Object restore is already in progress.
+ //
+ // - HTTP Status Code: 409 Conflict
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: RequestIsNotMultiPartContent
+ //
+ // - Description: Bucket POST must be of the enclosure-type multipart/form-data.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: RequestTimeout
+ //
+ // - Description: Your socket connection to the server was not read from or
+ // written to within the timeout period.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: RequestTimeTooSkewed
+ //
+ // - Description: The difference between the request time and the server's time
+ // is too large.
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: RequestTorrentOfBucketError
+ //
+ // - Description: Requesting the torrent file of a bucket is not permitted.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: SignatureDoesNotMatch
+ //
+ // - Description: The request signature we calculated does not match the
+ // signature you provided. Check your Amazon Web Services secret access key and
+ // signing method. For more information, see [REST Authentication]and [SOAP Authentication]for details.
+ //
+ // - HTTP Status Code: 403 Forbidden
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: ServiceUnavailable
+ //
+ // - Description: Service is unable to handle request.
+ //
+ // - HTTP Status Code: 503 Service Unavailable
+ //
+ // - SOAP Fault Code Prefix: Server
+ //
+ // - Code: SlowDown
+ //
+ // - Description: Reduce your request rate.
+ //
+ // - HTTP Status Code: 503 Slow Down
+ //
+ // - SOAP Fault Code Prefix: Server
+ //
+ // - Code: TemporaryRedirect
+ //
+ // - Description: You are being redirected to the bucket while DNS updates.
+ //
+ // - HTTP Status Code: 307 Moved Temporarily
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: TokenRefreshRequired
+ //
+ // - Description: The provided token must be refreshed.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: TooManyBuckets
+ //
+ // - Description: You have attempted to create more buckets than allowed.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: UnexpectedContent
+ //
+ // - Description: This request does not support content.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: UnresolvableGrantByEmailAddress
+ //
+ // - Description: The email address you provided does not match any account on
+ // record.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // - Code: UserKeyMustBeSpecified
+ //
+ // - Description: The bucket POST must contain the specified field name. If it
+ // is specified, check the order of the fields.
+ //
+ // - HTTP Status Code: 400 Bad Request
+ //
+ // - SOAP Fault Code Prefix: Client
+ //
+ // [How to Select a Region for Your Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
+ // [Error responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+ // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
+ // [Amazon S3]: http://aws.amazon.com/s3
+ // [SOAP Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html
+ Code *string
+
+ // The error key.
+ Key *string
+
+ // The error message contains a generic description of the error condition in
+ // English. It is intended for a human audience. Simple programs display the
+ // message directly to the end user if they encounter an error condition they don't
+ // know how or don't care to handle. Sophisticated programs with more exhaustive
+ // error handling and proper internationalization are more likely to ignore the
+ // error message.
+ Message *string
+
+ // The version ID of the error.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+// If the CreateBucketMetadataTableConfiguration request succeeds, but S3
+//
+// Metadata was unable to create the table, this structure contains the error code
+// and error message.
+type ErrorDetails struct {
+
+ // If the CreateBucketMetadataTableConfiguration request succeeds, but S3
+ // Metadata was unable to create the table, this structure contains the error code.
+ // The possible error codes and error messages are as follows:
+ //
+ // - AccessDeniedCreatingResources - You don't have sufficient permissions to
+ // create the required resources. Make sure that you have
+ // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and
+ // s3tables:PutTablePolicy permissions, and then try again. To create a new
+ // metadata table, you must delete the metadata configuration for this bucket, and
+ // then create a new metadata configuration.
+ //
+ // - AccessDeniedWritingToTable - Unable to write to the metadata table because
+ // of missing resource permissions. To fix the resource policy, Amazon S3 needs to
+ // create a new metadata table. To create a new metadata table, you must delete the
+ // metadata configuration for this bucket, and then create a new metadata
+ // configuration.
+ //
+ // - DestinationTableNotFound - The destination table doesn't exist. To create a
+ // new metadata table, you must delete the metadata configuration for this bucket,
+ // and then create a new metadata configuration.
+ //
+ // - ServerInternalError - An internal error has occurred. To create a new
+ // metadata table, you must delete the metadata configuration for this bucket, and
+ // then create a new metadata configuration.
+ //
+ // - TableAlreadyExists - The table that you specified already exists in the
+ // table bucket's namespace. Specify a different table name. To create a new
+ // metadata table, you must delete the metadata configuration for this bucket, and
+ // then create a new metadata configuration.
+ //
+ // - TableBucketNotFound - The table bucket that you specified doesn't exist in
+ // this Amazon Web Services Region and account. Create or choose a different table
+ // bucket. To create a new metadata table, you must delete the metadata
+ // configuration for this bucket, and then create a new metadata configuration.
+ ErrorCode *string
+
+ // If the CreateBucketMetadataTableConfiguration request succeeds, but S3
+ // Metadata was unable to create the table, this structure contains the error
+ // message. The possible error codes and error messages are as follows:
+ //
+ // - AccessDeniedCreatingResources - You don't have sufficient permissions to
+ // create the required resources. Make sure that you have
+ // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and
+ // s3tables:PutTablePolicy permissions, and then try again. To create a new
+ // metadata table, you must delete the metadata configuration for this bucket, and
+ // then create a new metadata configuration.
+ //
+ // - AccessDeniedWritingToTable - Unable to write to the metadata table because
+ // of missing resource permissions. To fix the resource policy, Amazon S3 needs to
+ // create a new metadata table. To create a new metadata table, you must delete the
+ // metadata configuration for this bucket, and then create a new metadata
+ // configuration.
+ //
+ // - DestinationTableNotFound - The destination table doesn't exist. To create a
+ // new metadata table, you must delete the metadata configuration for this bucket,
+ // and then create a new metadata configuration.
+ //
+ // - ServerInternalError - An internal error has occurred. To create a new
+ // metadata table, you must delete the metadata configuration for this bucket, and
+ // then create a new metadata configuration.
+ //
+ // - TableAlreadyExists - The table that you specified already exists in the
+ // table bucket's namespace. Specify a different table name. To create a new
+ // metadata table, you must delete the metadata configuration for this bucket, and
+ // then create a new metadata configuration.
+ //
+ // - TableBucketNotFound - The table bucket that you specified doesn't exist in
+ // this Amazon Web Services Region and account. Create or choose a different table
+ // bucket. To create a new metadata table, you must delete the metadata
+ // configuration for this bucket, and then create a new metadata configuration.
+ ErrorMessage *string
+
+ noSmithyDocumentSerde
+}
+
+// The error information.
+type ErrorDocument struct {
+
+ // The object key name to use when a 4XX class error occurs.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ //
+ // This member is required.
+ Key *string
+
+ noSmithyDocumentSerde
+}
+
+// A container for specifying the configuration for Amazon EventBridge.
+type EventBridgeConfiguration struct {
+ noSmithyDocumentSerde
+}
+
+// Optional configuration to replicate existing source bucket objects.
+//
+// This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in
+// the Amazon S3 User Guide.
+//
+// [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html
+type ExistingObjectReplication struct {
+
+ // Specifies whether Amazon S3 replicates existing source bucket objects.
+ //
+ // This member is required.
+ Status ExistingObjectReplicationStatus
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the Amazon S3 object key name to filter on. An object key name is the
+// name assigned to an object in your Amazon S3 bucket. You specify whether to
+// filter on the suffix or prefix of the object key name. A prefix is a specific
+// string of characters at the beginning of an object key name, which you can use
+// to organize objects. For example, you can start the key names of related objects
+// with a prefix, such as 2023- or engineering/ . Then, you can use FilterRule to
+// find objects in a bucket with key names that have the same prefix. A suffix is
+// similar to a prefix, but it is at the end of the object key name instead of at
+// the beginning.
+type FilterRule struct {
+
+ // The object key name prefix or suffix identifying one or more objects to which
+ // the filtering rule applies. The maximum length is 1,024 characters. Overlapping
+ // prefixes and suffixes are not supported. For more information, see [Configuring Event Notifications]in the
+ // Amazon S3 User Guide.
+ //
+ // [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+ Name FilterRuleName
+
+ // The value that the filter searches for in object key names.
+ Value *string
+
+ noSmithyDocumentSerde
+}
+
+// The metadata table configuration for a general purpose bucket.
+type GetBucketMetadataTableConfigurationResult struct {
+
+ // The metadata table configuration for a general purpose bucket.
+ //
+ // This member is required.
+ MetadataTableConfigurationResult *MetadataTableConfigurationResult
+
+ // The status of the metadata table. The status values are:
+ //
+ // - CREATING - The metadata table is in the process of being created in the
+ // specified table bucket.
+ //
+ // - ACTIVE - The metadata table has been created successfully and records are
+ // being delivered to the table.
+ //
+ // - FAILED - Amazon S3 is unable to create the metadata table, or Amazon S3 is
+ // unable to deliver records. See ErrorDetails for details.
+ //
+ // This member is required.
+ Status *string
+
+ // If the CreateBucketMetadataTableConfiguration request succeeds, but S3
+ // Metadata was unable to create the table, this structure contains the error code
+ // and error message.
+ Error *ErrorDetails
+
+ noSmithyDocumentSerde
+}
+
+// A collection of parts associated with a multipart upload.
+type GetObjectAttributesParts struct {
+
+ // Indicates whether the returned list of parts is truncated. A value of true
+ // indicates that the list was truncated. A list can be truncated if the number of
+ // parts exceeds the limit returned in the MaxParts element.
+ IsTruncated *bool
+
+ // The maximum number of parts allowed in the response.
+ MaxParts *int32
+
+ // When a list is truncated, this element specifies the last part in the list, as
+ // well as the value to use for the PartNumberMarker request parameter in a
+ // subsequent request.
+ NextPartNumberMarker *string
+
+ // The marker for the current part.
+ PartNumberMarker *string
+
+ // A container for elements related to a particular part. A response can contain
+ // zero or more Parts elements.
+ //
+ // - General purpose buckets - For GetObjectAttributes , if a additional checksum
+ // (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1
+ // , or x-amz-checksum-sha256 ) isn't applied to the object specified in the
+ // request, the response doesn't return Part .
+ //
+ // - Directory buckets - For GetObjectAttributes , no matter whether a additional
+ // checksum is applied to the object specified in the request, the response returns
+ // Part .
+ Parts []ObjectPart
+
+ // The total number of parts.
+ TotalPartsCount *int32
+
+ noSmithyDocumentSerde
+}
+
+// Container for S3 Glacier job parameters.
+type GlacierJobParameters struct {
+
+ // Retrieval tier at which the restore will be processed.
+ //
+ // This member is required.
+ Tier Tier
+
+ noSmithyDocumentSerde
+}
+
+// Container for grant information.
+type Grant struct {
+
+ // The person being granted permissions.
+ Grantee *Grantee
+
+ // Specifies the permission given to the grantee.
+ Permission Permission
+
+ noSmithyDocumentSerde
+}
+
+// Container for the person being granted permissions.
+type Grantee struct {
+
+ // Type of grantee
+ //
+ // This member is required.
+ Type Type
+
+ // Screen name of the grantee.
+ DisplayName *string
+
+ // Email address of the grantee.
+ //
+ // Using email addresses to specify a grantee is only supported in the following
+ // Amazon Web Services Regions:
+ //
+ // - US East (N. Virginia)
+ //
+ // - US West (N. California)
+ //
+ // - US West (Oregon)
+ //
+ // - Asia Pacific (Singapore)
+ //
+ // - Asia Pacific (Sydney)
+ //
+ // - Asia Pacific (Tokyo)
+ //
+ // - Europe (Ireland)
+ //
+ // - South America (São Paulo)
+ //
+ // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints] in the
+ // Amazon Web Services General Reference.
+ //
+ // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
+ EmailAddress *string
+
+ // The canonical user ID of the grantee.
+ ID *string
+
+ // URI of the grantee group.
+ URI *string
+
+ noSmithyDocumentSerde
+}
+
+// Container for the Suffix element.
+type IndexDocument struct {
+
+ // A suffix that is appended to a request that is for a directory on the website
+ // endpoint. (For example, if the suffix is index.html and you make a request to
+ // samplebucket/images/ , the data that is returned will be for the object with the
+ // key name images/index.html .) The suffix must not be empty and must not include
+ // a slash character.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ //
+ // This member is required.
+ Suffix *string
+
+ noSmithyDocumentSerde
+}
+
+// Container element that identifies who initiated the multipart upload.
+type Initiator struct {
+
+ // Name of the Principal.
+ //
+ // This functionality is not supported for directory buckets.
+ DisplayName *string
+
+ // If the principal is an Amazon Web Services account, it provides the Canonical
+ // User ID. If the principal is an IAM User, it provides a user ARN value.
+ //
+ // Directory buckets - If the principal is an Amazon Web Services account, it
+ // provides the Amazon Web Services account ID. If the principal is an IAM User, it
+ // provides a user ARN value.
+ ID *string
+
+ noSmithyDocumentSerde
+}
+
+// Describes the serialization format of the object.
+type InputSerialization struct {
+
+ // Describes the serialization of a CSV-encoded object.
+ CSV *CSVInput
+
+ // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default
+ // Value: NONE.
+ CompressionType CompressionType
+
+ // Specifies JSON as object's input serialization format.
+ JSON *JSONInput
+
+ // Specifies Parquet as object's input serialization format.
+ Parquet *ParquetInput
+
+ noSmithyDocumentSerde
+}
+
+// A container for specifying S3 Intelligent-Tiering filters. The filters
+// determine the subset of objects to which the rule applies.
+type IntelligentTieringAndOperator struct {
+
+ // An object key name prefix that identifies the subset of objects to which the
+ // configuration applies.
+ Prefix *string
+
+ // All of these tags must exist in the object's tag set in order for the
+ // configuration to apply.
+ Tags []Tag
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket.
+//
+// For information about the S3 Intelligent-Tiering storage class, see [Storage class for automatically optimizing frequently and infrequently accessed objects].
+//
+// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
+type IntelligentTieringConfiguration struct {
+
+ // The ID used to identify the S3 Intelligent-Tiering configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Specifies the status of the configuration.
+ //
+ // This member is required.
+ Status IntelligentTieringStatus
+
+ // Specifies the S3 Intelligent-Tiering storage class tier of the configuration.
+ //
+ // This member is required.
+ Tierings []Tiering
+
+ // Specifies a bucket filter. The configuration only includes objects that meet
+ // the filter's criteria.
+ Filter *IntelligentTieringFilter
+
+ noSmithyDocumentSerde
+}
+
+// The Filter is used to identify objects that the S3 Intelligent-Tiering
+// configuration applies to.
+type IntelligentTieringFilter struct {
+
+ // A conjunction (logical AND) of predicates, which is used in evaluating a
+ // metrics filter. The operator must have at least two predicates, and an object
+ // must match all of the predicates in order for the filter to apply.
+ And *IntelligentTieringAndOperator
+
+ // An object key name prefix that identifies the subset of objects to which the
+ // rule applies.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ Prefix *string
+
+ // A container of a key value name pair.
+ Tag *Tag
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the inventory configuration for an Amazon S3 bucket. For more
+// information, see [GET Bucket inventory]in the Amazon S3 API Reference.
+//
+// [GET Bucket inventory]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html
+type InventoryConfiguration struct {
+
+ // Contains information about where to publish the inventory results.
+ //
+ // This member is required.
+ Destination *InventoryDestination
+
+ // The ID used to identify the inventory configuration.
+ //
+ // This member is required.
+ Id *string
+
+ // Object versions to include in the inventory list. If set to All , the list
+ // includes all the object versions, which adds the version-related fields
+ // VersionId , IsLatest , and DeleteMarker to the list. If set to Current , the
+ // list does not contain these version-related fields.
+ //
+ // This member is required.
+ IncludedObjectVersions InventoryIncludedObjectVersions
+
+ // Specifies whether the inventory is enabled or disabled. If set to True , an
+ // inventory list is generated. If set to False , no inventory list is generated.
+ //
+ // This member is required.
+ IsEnabled *bool
+
+ // Specifies the schedule for generating inventory results.
+ //
+ // This member is required.
+ Schedule *InventorySchedule
+
+ // Specifies an inventory filter. The inventory only includes objects that meet
+ // the filter's criteria.
+ Filter *InventoryFilter
+
+ // Contains the optional fields that are included in the inventory results.
+ OptionalFields []InventoryOptionalField
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the inventory configuration for an Amazon S3 bucket.
+type InventoryDestination struct {
+
+ // Contains the bucket name, file format, bucket owner (optional), and prefix
+ // (optional) where inventory results are published.
+ //
+ // This member is required.
+ S3BucketDestination *InventoryS3BucketDestination
+
+ noSmithyDocumentSerde
+}
+
+// Contains the type of server-side encryption used to encrypt the inventory
+// results.
+type InventoryEncryption struct {
+
+ // Specifies the use of SSE-KMS to encrypt delivered inventory reports.
+ SSEKMS *SSEKMS
+
+ // Specifies the use of SSE-S3 to encrypt delivered inventory reports.
+ SSES3 *SSES3
+
+ noSmithyDocumentSerde
+}
+
+// Specifies an inventory filter. The inventory only includes objects that meet
+// the filter's criteria.
+type InventoryFilter struct {
+
+ // The prefix that an object must have to be included in the inventory results.
+ //
+ // This member is required.
+ Prefix *string
+
+ noSmithyDocumentSerde
+}
+
+// Contains the bucket name, file format, bucket owner (optional), and prefix
+// (optional) where inventory results are published.
+type InventoryS3BucketDestination struct {
+
+ // The Amazon Resource Name (ARN) of the bucket where inventory results will be
+ // published.
+ //
+ // This member is required.
+ Bucket *string
+
+ // Specifies the output format of the inventory results.
+ //
+ // This member is required.
+ Format InventoryFormat
+
+ // The account ID that owns the destination S3 bucket. If no account ID is
+ // provided, the owner is not validated before exporting data.
+ //
+ // Although this value is optional, we strongly recommend that you set it to help
+ // prevent problems if the destination bucket ownership changes.
+ AccountId *string
+
+ // Contains the type of server-side encryption used to encrypt the inventory
+ // results.
+ Encryption *InventoryEncryption
+
+ // The prefix that is prepended to all inventory results.
+ Prefix *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the schedule for generating inventory results.
+type InventorySchedule struct {
+
+ // Specifies how frequently inventory results are produced.
+ //
+ // This member is required.
+ Frequency InventoryFrequency
+
+ noSmithyDocumentSerde
+}
+
+// Specifies JSON as object's input serialization format.
+type JSONInput struct {
+
+ // The type of JSON. Valid values: Document, Lines.
+ Type JSONType
+
+ noSmithyDocumentSerde
+}
+
+// Specifies JSON as request's output serialization format.
+type JSONOutput struct {
+
+ // The value used to separate individual records in the output. If no value is
+ // specified, Amazon S3 uses a newline character ('\n').
+ RecordDelimiter *string
+
+ noSmithyDocumentSerde
+}
+
+// A container for specifying the configuration for Lambda notifications.
+type LambdaFunctionConfiguration struct {
+
+ // The Amazon S3 bucket event for which to invoke the Lambda function. For more
+ // information, see [Supported Event Types]in the Amazon S3 User Guide.
+ //
+ // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+ //
+ // This member is required.
+ Events []Event
+
+ // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes
+ // when the specified event type occurs.
+ //
+ // This member is required.
+ LambdaFunctionArn *string
+
+ // Specifies object key name filtering rules. For information about key name
+ // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide.
+ //
+ // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html
+ Filter *NotificationConfigurationFilter
+
+ // An optional unique identifier for configurations in a notification
+ // configuration. If you don't provide one, Amazon S3 will assign an ID.
+ Id *string
+
+ noSmithyDocumentSerde
+}
+
+// Container for the expiration for the lifecycle of the object.
+//
+// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide.
+//
+// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html
+type LifecycleExpiration struct {
+
+ // Indicates at what date the object is to be moved or deleted. The date value
+ // must conform to the ISO 8601 format. The time is always midnight UTC.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ Date *time.Time
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days *int32
+
+ // Indicates whether Amazon S3 will remove a delete marker with no noncurrent
+ // versions. If set to true, the delete marker will be expired; if set to false the
+ // policy takes no action. This cannot be specified with Days or Date in a
+ // Lifecycle Expiration Policy.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ ExpiredObjectDeleteMarker *bool
+
+ noSmithyDocumentSerde
+}
+
+// A lifecycle rule for individual objects in an Amazon S3 bucket.
+//
+// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide.
+//
+// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html
+type LifecycleRule struct {
+
+ // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is
+ // not currently being applied.
+ //
+ // This member is required.
+ Status ExpirationStatus
+
+ // Specifies the days since the initiation of an incomplete multipart upload that
+ // Amazon S3 will wait before permanently removing all parts of the upload. For
+ // more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide.
+ //
+ // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload
+
+ // Specifies the expiration for the lifecycle of the object in the form of date,
+ // days and, whether the object has a delete marker.
+ Expiration *LifecycleExpiration
+
+ // The Filter is used to identify objects that a Lifecycle Rule applies to. A
+ // Filter must have exactly one of Prefix , Tag , or And specified. Filter is
+ // required if the LifecycleRule does not contain a Prefix element.
+ //
+ // Tag filters are not supported for directory buckets.
+ Filter *LifecycleRuleFilter
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon S3
+ // permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended) to
+ // request that Amazon S3 delete noncurrent object versions at a specific period in
+ // the object's lifetime.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration
+
+ // Specifies the transition rule for the lifecycle rule that describes when
+ // noncurrent objects transition to a specific storage class. If your bucket is
+ // versioning-enabled (or versioning is suspended), you can set this action to
+ // request that Amazon S3 transition noncurrent object versions to a specific
+ // storage class at a set period in the object's lifetime.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ NoncurrentVersionTransitions []NoncurrentVersionTransition
+
+ // Prefix identifying one or more objects to which the rule applies. This is no
+ // longer used; use Filter instead.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ //
+ // Deprecated: This member has been deprecated.
+ Prefix *string
+
+ // Specifies when an Amazon S3 object transitions to a specified storage class.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ Transitions []Transition
+
+ noSmithyDocumentSerde
+}
+
+// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more
+// predicates. The Lifecycle Rule will apply to any object matching all of the
+// predicates configured inside the And operator.
+type LifecycleRuleAndOperator struct {
+
+ // Minimum object size to which the rule applies.
+ ObjectSizeGreaterThan *int64
+
+ // Maximum object size to which the rule applies.
+ ObjectSizeLessThan *int64
+
+ // Prefix identifying one or more objects to which the rule applies.
+ Prefix *string
+
+ // All of these tags must exist in the object's tag set in order for the rule to
+ // apply.
+ Tags []Tag
+
+ noSmithyDocumentSerde
+}
+
+// The Filter is used to identify objects that a Lifecycle Rule applies to. A
+// Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan ,
+// ObjectSizeLessThan , or And specified. If the Filter element is left empty, the
+// Lifecycle Rule applies to all objects in the bucket.
+type LifecycleRuleFilter struct {
+
+ // This is used in a Lifecycle Rule Filter to apply a logical AND to two or more
+ // predicates. The Lifecycle Rule will apply to any object matching all of the
+ // predicates configured inside the And operator.
+ And *LifecycleRuleAndOperator
+
+ // Minimum object size to which the rule applies.
+ ObjectSizeGreaterThan *int64
+
+ // Maximum object size to which the rule applies.
+ ObjectSizeLessThan *int64
+
+ // Prefix identifying one or more objects to which the rule applies.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ Prefix *string
+
+ // This tag must exist in the object's tag set in order for the rule to apply.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ Tag *Tag
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the location where the bucket will be created.
+//
+// For directory buckets, the location type is Availability Zone or Local Zone.
+// For more information about directory buckets, see [Working with directory buckets]in the Amazon S3 User Guide.
+//
+// This functionality is only supported by directory buckets.
+//
+// [Working with directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html
+type LocationInfo struct {
+
+ // The name of the location where the bucket will be created.
+ //
+ // For directory buckets, the name of the location is the Zone ID of the
+ // Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An
+ // example AZ ID value is usw2-az1 .
+ Name *string
+
+ // The type of location where the bucket will be created.
+ Type LocationType
+
+ noSmithyDocumentSerde
+}
+
+// Describes where logs are stored and the prefix that Amazon S3 assigns to all
+// log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API
+// Reference.
+//
+// [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html
+type LoggingEnabled struct {
+
+ // Specifies the bucket where you want Amazon S3 to store server access logs. You
+ // can have your logs delivered to any bucket that you own, including the same
+ // bucket that is being logged. You can also configure multiple buckets to deliver
+ // their logs to the same target bucket. In this case, you should choose a
+ // different TargetPrefix for each source bucket so that the delivered log files
+ // can be distinguished by key.
+ //
+ // This member is required.
+ TargetBucket *string
+
+ // A prefix for all log object keys. If you store log files from multiple Amazon
+ // S3 buckets in a single bucket, you can use a prefix to distinguish which log
+ // files came from which bucket.
+ //
+ // This member is required.
+ TargetPrefix *string
+
+ // Container for granting information.
+ //
+ // Buckets that use the bucket owner enforced setting for Object Ownership don't
+ // support target grants. For more information, see [Permissions for server access log delivery]in the Amazon S3 User Guide.
+ //
+ // [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general
+ TargetGrants []TargetGrant
+
+ // Amazon S3 key format for log objects.
+ TargetObjectKeyFormat *TargetObjectKeyFormat
+
+ noSmithyDocumentSerde
+}
+
+// A metadata key-value pair to store with an object.
+type MetadataEntry struct {
+
+ // Name of the object.
+ Name *string
+
+ // Value of the object.
+ Value *string
+
+ noSmithyDocumentSerde
+}
+
+// The metadata table configuration for a general purpose bucket.
+type MetadataTableConfiguration struct {
+
+ // The destination information for the metadata table configuration. The
+ // destination table bucket must be in the same Region and Amazon Web Services
+ // account as the general purpose bucket. The specified metadata table name must be
+ // unique within the aws_s3_metadata namespace in the destination table bucket.
+ //
+ // This member is required.
+ S3TablesDestination *S3TablesDestination
+
+ noSmithyDocumentSerde
+}
+
+// The metadata table configuration for a general purpose bucket. The destination
+//
+// table bucket must be in the same Region and Amazon Web Services account as the
+// general purpose bucket. The specified metadata table name must be unique within
+// the aws_s3_metadata namespace in the destination table bucket.
+type MetadataTableConfigurationResult struct {
+
+ // The destination information for the metadata table configuration. The
+ // destination table bucket must be in the same Region and Amazon Web Services
+ // account as the general purpose bucket. The specified metadata table name must be
+ // unique within the aws_s3_metadata namespace in the destination table bucket.
+ //
+ // This member is required.
+ S3TablesDestinationResult *S3TablesDestinationResult
+
+ noSmithyDocumentSerde
+}
+
+// A container specifying replication metrics-related settings enabling
+//
+// replication metrics and events.
+type Metrics struct {
+
+ // Specifies whether the replication metrics are enabled.
+ //
+ // This member is required.
+ Status MetricsStatus
+
+ // A container specifying the time threshold for emitting the
+ // s3:Replication:OperationMissedThreshold event.
+ EventThreshold *ReplicationTimeValue
+
+ noSmithyDocumentSerde
+}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating a
+// metrics filter. The operator must have at least two predicates, and an object
+// must match all of the predicates in order for the filter to apply.
+type MetricsAndOperator struct {
+
+ // The access point ARN used when evaluating an AND predicate.
+ AccessPointArn *string
+
+ // The prefix used when evaluating an AND predicate.
+ Prefix *string
+
+ // The list of tags used when evaluating an AND predicate.
+ Tags []Tag
+
+ noSmithyDocumentSerde
+}
+
+// Specifies a metrics configuration for the CloudWatch request metrics (specified
+// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an
+// existing metrics configuration, note that this is a full replacement of the
+// existing metrics configuration. If you don't include the elements you want to
+// keep, they are erased. For more information, see [PutBucketMetricsConfiguration].
+//
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html
+type MetricsConfiguration struct {
+
+ // The ID used to identify the metrics configuration. The ID has a 64 character
+ // limit and can only contain letters, numbers, periods, dashes, and underscores.
+ //
+ // This member is required.
+ Id *string
+
+ // Specifies a metrics configuration filter. The metrics configuration will only
+ // include objects that meet the filter's criteria. A filter must be a prefix, an
+ // object tag, an access point ARN, or a conjunction (MetricsAndOperator).
+ Filter MetricsFilter
+
+ noSmithyDocumentSerde
+}
+
+// Specifies a metrics configuration filter. The metrics configuration only
+// includes objects that meet the filter's criteria. A filter must be a prefix, an
+// object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more
+// information, see [PutBucketMetricsConfiguration].
+//
+// The following types satisfy this interface:
+//
+// MetricsFilterMemberAccessPointArn
+// MetricsFilterMemberAnd
+// MetricsFilterMemberPrefix
+// MetricsFilterMemberTag
+//
+// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html
+type MetricsFilter interface {
+ isMetricsFilter()
+}
+
+// The access point ARN used when evaluating a metrics filter.
+type MetricsFilterMemberAccessPointArn struct {
+ Value string
+
+ noSmithyDocumentSerde
+}
+
+func (*MetricsFilterMemberAccessPointArn) isMetricsFilter() {}
+
+// A conjunction (logical AND) of predicates, which is used in evaluating a
+// metrics filter. The operator must have at least two predicates, and an object
+// must match all of the predicates in order for the filter to apply.
+type MetricsFilterMemberAnd struct {
+ Value MetricsAndOperator
+
+ noSmithyDocumentSerde
+}
+
+func (*MetricsFilterMemberAnd) isMetricsFilter() {}
+
+// The prefix used when evaluating a metrics filter.
+type MetricsFilterMemberPrefix struct {
+ Value string
+
+ noSmithyDocumentSerde
+}
+
+func (*MetricsFilterMemberPrefix) isMetricsFilter() {}
+
+// The tag used when evaluating a metrics filter.
+type MetricsFilterMemberTag struct {
+ Value Tag
+
+ noSmithyDocumentSerde
+}
+
+func (*MetricsFilterMemberTag) isMetricsFilter() {}
+
+// Container for the MultipartUpload for the Amazon S3 object.
+type MultipartUpload struct {
+
+ // The algorithm that was used to create a checksum of the object.
+ ChecksumAlgorithm ChecksumAlgorithm
+
+ // The checksum type that is used to calculate the object’s checksum value. For
+ // more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType ChecksumType
+
+ // Date and time at which the multipart upload was initiated.
+ Initiated *time.Time
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator
+
+ // Key of the object for which the multipart upload was initiated.
+ Key *string
+
+ // Specifies the owner of the object that is part of the multipart upload.
+ //
+ // Directory buckets - The bucket owner is returned as the object owner for all
+ // the objects.
+ Owner *Owner
+
+ // The class of storage used to store the object.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
+ StorageClass StorageClass
+
+ // Upload ID that identifies the multipart upload.
+ UploadId *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies when noncurrent object versions expire. Upon expiration, Amazon S3
+// permanently deletes the noncurrent object versions. You set this lifecycle
+// configuration action on a bucket that has versioning enabled (or suspended) to
+// request that Amazon S3 delete noncurrent object versions at a specific period in
+// the object's lifetime.
+//
+// This parameter applies to general purpose buckets only. It is not supported for
+// directory bucket lifecycle configurations.
+type NoncurrentVersionExpiration struct {
+
+ // Specifies how many noncurrent versions Amazon S3 will retain. You can specify
+ // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any
+ // additional noncurrent versions beyond the specified number to retain. For more
+ // information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ //
+ // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html
+ NewerNoncurrentVersions *int32
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. The value must be a non-zero positive integer.
+ // For information about the noncurrent days calculations, see [How Amazon S3 Calculates When an Object Became Noncurrent]in the Amazon S3
+ // User Guide.
+ //
+ // This parameter applies to general purpose buckets only. It is not supported for
+ // directory bucket lifecycle configurations.
+ //
+ // [How Amazon S3 Calculates When an Object Became Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations
+ NoncurrentDays *int32
+
+ noSmithyDocumentSerde
+}
+
+// Container for the transition rule that describes when noncurrent objects
+// transition to the STANDARD_IA , ONEZONE_IA , INTELLIGENT_TIERING , GLACIER_IR ,
+// GLACIER , or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled
+// (or versioning is suspended), you can set this action to request that Amazon S3
+// transition noncurrent object versions to the STANDARD_IA , ONEZONE_IA ,
+// INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE storage class at a
+// specific period in the object's lifetime.
+type NoncurrentVersionTransition struct {
+
+ // Specifies how many noncurrent versions Amazon S3 will retain in the same
+ // storage class before transitioning objects. You can specify up to 100 noncurrent
+ // versions to retain. Amazon S3 will transition any additional noncurrent versions
+ // beyond the specified number to retain. For more information about noncurrent
+ // versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide.
+ //
+ // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html
+ NewerNoncurrentVersions *int32
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]in the Amazon S3 User Guide.
+ //
+ // [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations
+ NoncurrentDays *int32
+
+ // The class of storage used to store the object.
+ StorageClass TransitionStorageClass
+
+ noSmithyDocumentSerde
+}
+
+// A container for specifying the notification configuration of the bucket. If
+// this element is empty, notifications are turned off for the bucket.
+type NotificationConfiguration struct {
+
+ // Enables delivery of events to Amazon EventBridge.
+ EventBridgeConfiguration *EventBridgeConfiguration
+
+ // Describes the Lambda functions to invoke and the events for which to invoke
+ // them.
+ LambdaFunctionConfigurations []LambdaFunctionConfiguration
+
+ // The Amazon Simple Queue Service queues to publish messages to and the events
+ // for which to publish messages.
+ QueueConfigurations []QueueConfiguration
+
+ // The topic to which notifications are sent and the events for which
+ // notifications are generated.
+ TopicConfigurations []TopicConfiguration
+
+ noSmithyDocumentSerde
+}
+
+// Specifies object key name filtering rules. For information about key name
+// filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide.
+//
+// [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html
+type NotificationConfigurationFilter struct {
+
+ // A container for object key name prefix and suffix filtering rules.
+ Key *S3KeyFilter
+
+ noSmithyDocumentSerde
+}
+
+// An object consists of data and its descriptive metadata.
+type Object struct {
+
+ // The algorithm that was used to create a checksum of the object.
+ ChecksumAlgorithm []ChecksumAlgorithm
+
+ // The checksum type that is used to calculate the object’s checksum value. For
+ // more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType ChecksumType
+
+ // The entity tag is a hash of the object. The ETag reflects changes only to the
+ // contents of an object, not its metadata. The ETag may or may not be an MD5
+ // digest of the object data. Whether or not it is depends on how the object was
+ // created and how it is encrypted as described below:
+ //
+ // - Objects created by the PUT Object, POST Object, or Copy operation, or
+ // through the Amazon Web Services Management Console, and are encrypted by SSE-S3
+ // or plaintext, have ETags that are an MD5 digest of their object data.
+ //
+ // - Objects created by the PUT Object, POST Object, or Copy operation, or
+ // through the Amazon Web Services Management Console, and are encrypted by SSE-C
+ // or SSE-KMS, have ETags that are not an MD5 digest of their object data.
+ //
+ // - If an object is created by either the Multipart Upload or Part Copy
+ // operation, the ETag is not an MD5 digest, regardless of the method of
+ // encryption. If an object is larger than 16 MB, the Amazon Web Services
+ // Management Console will upload or copy that object as a Multipart Upload, and
+ // therefore the ETag will not be an MD5 digest.
+ //
+ // Directory buckets - MD5 is not supported by directory buckets.
+ ETag *string
+
+ // The name that you assign to an object. You use the object key to retrieve the
+ // object.
+ Key *string
+
+ // Creation date of the object.
+ LastModified *time.Time
+
+ // The owner of the object
+ //
+ // Directory buckets - The bucket owner is returned as the object owner.
+ Owner *Owner
+
+ // Specifies the restoration status of an object. Objects in certain storage
+ // classes must be restored before they can be retrieved. For more information
+ // about these storage classes and how to work with archived objects, see [Working with archived objects]in the
+ // Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets. Only the S3 Express
+ // One Zone storage class is supported by directory buckets to store objects.
+ //
+ // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html
+ RestoreStatus *RestoreStatus
+
+ // Size in bytes of the object
+ Size *int64
+
+ // The class of storage used to store the object.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported by
+ // directory buckets to store objects.
+ StorageClass ObjectStorageClass
+
+ noSmithyDocumentSerde
+}
+
+// Object Identifier is unique value to identify objects.
+type ObjectIdentifier struct {
+
+ // Key name of the object.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ //
+ // This member is required.
+ Key *string
+
+ // An entity tag (ETag) is an identifier assigned by a web server to a specific
+ // version of a resource found at a URL. This header field makes the request method
+ // conditional on ETags .
+ //
+ // Entity tags (ETags) for S3 Express One Zone are random alphanumeric strings
+ // unique to the object.
+ ETag *string
+
+ // If present, the objects are deleted only if its modification times matches the
+ // provided Timestamp .
+ //
+ // This functionality is only supported for directory buckets.
+ LastModifiedTime *time.Time
+
+ // If present, the objects are deleted only if its size matches the provided size
+ // in bytes.
+ //
+ // This functionality is only supported for directory buckets.
+ Size *int64
+
+ // Version ID for the specific version of the object to delete.
+ //
+ // This functionality is not supported for directory buckets.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+// The container element for Object Lock configuration parameters.
+type ObjectLockConfiguration struct {
+
+ // Indicates whether this bucket has an Object Lock configuration enabled. Enable
+ // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket.
+ ObjectLockEnabled ObjectLockEnabled
+
+ // Specifies the Object Lock rule for the specified object. Enable the this rule
+ // when you apply ObjectLockConfiguration to a bucket. Bucket settings require
+ // both a mode and a period. The period can be either Days or Years but you must
+ // select one. You cannot specify Days and Years at the same time.
+ Rule *ObjectLockRule
+
+ noSmithyDocumentSerde
+}
+
+// A legal hold configuration for an object.
+type ObjectLockLegalHold struct {
+
+ // Indicates whether the specified object has a legal hold in place.
+ Status ObjectLockLegalHoldStatus
+
+ noSmithyDocumentSerde
+}
+
+// A Retention configuration for an object.
+type ObjectLockRetention struct {
+
+ // Indicates the Retention mode for the specified object.
+ Mode ObjectLockRetentionMode
+
+ // The date on which this Object Lock Retention will expire.
+ RetainUntilDate *time.Time
+
+ noSmithyDocumentSerde
+}
+
+// The container element for an Object Lock rule.
+type ObjectLockRule struct {
+
+ // The default Object Lock retention mode and period that you want to apply to new
+ // objects placed in the specified bucket. Bucket settings require both a mode and
+ // a period. The period can be either Days or Years but you must select one. You
+ // cannot specify Days and Years at the same time.
+ DefaultRetention *DefaultRetention
+
+ noSmithyDocumentSerde
+}
+
+// A container for elements related to an individual part.
+type ObjectPart struct {
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present
+ // if the multipart upload request was created with the CRC32 checksum algorithm.
+ // For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is
+ // present if the multipart upload request was created with the CRC32C checksum
+ // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is
+ // present if the multipart upload request was created with the CRC64NVME checksum
+ // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added
+ // the default checksum, CRC64NVME , to the uploaded object). For more information,
+ // see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present
+ // if the multipart upload request was created with the SHA1 checksum algorithm.
+ // For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is
+ // present if the multipart upload request was created with the SHA256 checksum
+ // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // The part number identifying the part. This value is a positive integer between
+ // 1 and 10,000.
+ PartNumber *int32
+
+ // The size of the uploaded part in bytes.
+ Size *int64
+
+ noSmithyDocumentSerde
+}
+
+// The version of an object.
+type ObjectVersion struct {
+
+ // The algorithm that was used to create a checksum of the object.
+ ChecksumAlgorithm []ChecksumAlgorithm
+
+ // The checksum type that is used to calculate the object’s checksum value. For
+ // more information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumType ChecksumType
+
+ // The entity tag is an MD5 hash of that version of the object.
+ ETag *string
+
+ // Specifies whether the object is (true) or is not (false) the latest version of
+ // an object.
+ IsLatest *bool
+
+ // The object key.
+ Key *string
+
+ // Date and time when the object was last modified.
+ LastModified *time.Time
+
+ // Specifies the owner of the object.
+ Owner *Owner
+
+ // Specifies the restoration status of an object. Objects in certain storage
+ // classes must be restored before they can be retrieved. For more information
+ // about these storage classes and how to work with archived objects, see [Working with archived objects]in the
+ // Amazon S3 User Guide.
+ //
+ // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html
+ RestoreStatus *RestoreStatus
+
+ // Size in bytes of the object.
+ Size *int64
+
+ // The class of storage used to store the object.
+ StorageClass ObjectVersionStorageClass
+
+ // Version ID of an object.
+ VersionId *string
+
+ noSmithyDocumentSerde
+}
+
+// Describes the location where the restore job's output is stored.
+type OutputLocation struct {
+
+ // Describes an S3 location that will receive the results of the restore request.
+ S3 *S3Location
+
+ noSmithyDocumentSerde
+}
+
+// Describes how results of the Select job are serialized.
+type OutputSerialization struct {
+
+ // Describes the serialization of CSV-encoded Select results.
+ CSV *CSVOutput
+
+ // Specifies JSON as request's output serialization format.
+ JSON *JSONOutput
+
+ noSmithyDocumentSerde
+}
+
+// Container for the owner's display name and ID.
+type Owner struct {
+
+ // Container for the display name of the owner. This value is only supported in
+ // the following Amazon Web Services Regions:
+ //
+ // - US East (N. Virginia)
+ //
+ // - US West (N. California)
+ //
+ // - US West (Oregon)
+ //
+ // - Asia Pacific (Singapore)
+ //
+ // - Asia Pacific (Sydney)
+ //
+ // - Asia Pacific (Tokyo)
+ //
+ // - Europe (Ireland)
+ //
+ // - South America (São Paulo)
+ //
+ // This functionality is not supported for directory buckets.
+ DisplayName *string
+
+ // Container for the ID of the owner.
+ ID *string
+
+ noSmithyDocumentSerde
+}
+
+// The container element for a bucket's ownership controls.
+type OwnershipControls struct {
+
+ // The container element for an ownership control rule.
+ //
+ // This member is required.
+ Rules []OwnershipControlsRule
+
+ noSmithyDocumentSerde
+}
+
+// The container element for an ownership control rule.
+type OwnershipControlsRule struct {
+
+ // The container element for object ownership for a bucket's ownership controls.
+ //
+ // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the
+ // bucket owner if the objects are uploaded with the bucket-owner-full-control
+ // canned ACL.
+ //
+ // ObjectWriter - The uploading account will own the object if the object is
+ // uploaded with the bucket-owner-full-control canned ACL.
+ //
+ // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer
+ // affect permissions. The bucket owner automatically owns and has full control
+ // over every object in the bucket. The bucket only accepts PUT requests that don't
+ // specify an ACL or specify bucket owner full control ACLs (such as the predefined
+ // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants
+ // the same permissions).
+ //
+ // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are
+ // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where
+ // you must control access for each object individually. For more information about
+ // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets. Directory buckets
+ // use the bucket owner enforced setting for S3 Object Ownership.
+ //
+ // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
+ //
+ // This member is required.
+ ObjectOwnership ObjectOwnership
+
+ noSmithyDocumentSerde
+}
+
+// Container for Parquet.
+type ParquetInput struct {
+ noSmithyDocumentSerde
+}
+
+// Container for elements related to a part.
+type Part struct {
+
+ // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present
+ // if the object was uploaded with the CRC32 checksum algorithm. For more
+ // information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32 *string
+
+ // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is
+ // present if the object was uploaded with the CRC32C checksum algorithm. For more
+ // information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC32C *string
+
+ // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is
+ // present if the multipart upload request was created with the CRC64NVME checksum
+ // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added
+ // the default checksum, CRC64NVME , to the uploaded object). For more information,
+ // see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumCRC64NVME *string
+
+ // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present
+ // if the object was uploaded with the SHA1 checksum algorithm. For more
+ // information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA1 *string
+
+ // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is
+ // present if the object was uploaded with the SHA256 checksum algorithm. For more
+ // information, see [Checking object integrity]in the Amazon S3 User Guide.
+ //
+ // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
+ ChecksumSHA256 *string
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string
+
+ // Date and time at which the part was uploaded.
+ LastModified *time.Time
+
+ // Part number identifying the part. This is a positive integer between 1 and
+ // 10,000.
+ PartNumber *int32
+
+ // Size in bytes of the uploaded part data.
+ Size *int64
+
+ noSmithyDocumentSerde
+}
+
+// Amazon S3 keys for log objects are partitioned in the following format:
+//
+// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
+//
+// PartitionedPrefix defaults to EventTime delivery when server access logs are
+// delivered.
+type PartitionedPrefix struct {
+
+ // Specifies the partition date source for the partitioned prefix.
+ // PartitionDateSource can be EventTime or DeliveryTime .
+ //
+ // For DeliveryTime , the time in the log file names corresponds to the delivery
+ // time for the log files.
+ //
+ // For EventTime , The logs delivered are for a specific day only. The year, month,
+ // and day correspond to the day on which the event occurred, and the hour, minutes
+ // and seconds are set to 00 in the key.
+ PartitionDateSource PartitionDateSource
+
+ noSmithyDocumentSerde
+}
+
+// The container element for a bucket's policy status.
+type PolicyStatus struct {
+
+ // The policy status for this bucket. TRUE indicates that this bucket is public.
+ // FALSE indicates that the bucket is not public.
+ IsPublic *bool
+
+ noSmithyDocumentSerde
+}
+
+// This data type contains information about progress of an operation.
+type Progress struct {
+
+ // The current number of uncompressed object bytes processed.
+ BytesProcessed *int64
+
+ // The current number of bytes of records payload data returned.
+ BytesReturned *int64
+
+ // The current number of object bytes scanned.
+ BytesScanned *int64
+
+ noSmithyDocumentSerde
+}
+
+// This data type contains information about the progress event of an operation.
+type ProgressEvent struct {
+
+ // The Progress event details.
+ Details *Progress
+
+ noSmithyDocumentSerde
+}
+
+// The PublicAccessBlock configuration that you want to apply to this Amazon S3
+// bucket. You can enable the configuration options in any combination. For more
+// information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in
+// the Amazon S3 User Guide.
+//
+// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status
+type PublicAccessBlockConfiguration struct {
+
+ // Specifies whether Amazon S3 should block public access control lists (ACLs) for
+ // this bucket and objects in this bucket. Setting this element to TRUE causes the
+ // following behavior:
+ //
+ // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public.
+ //
+ // - PUT Object calls fail if the request includes a public ACL.
+ //
+ // - PUT Bucket calls fail if the request includes a public ACL.
+ //
+ // Enabling this setting doesn't affect existing policies or ACLs.
+ BlockPublicAcls *bool
+
+ // Specifies whether Amazon S3 should block public bucket policies for this
+ // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT
+ // Bucket policy if the specified bucket policy allows public access.
+ //
+ // Enabling this setting doesn't affect existing bucket policies.
+ BlockPublicPolicy *bool
+
+ // Specifies whether Amazon S3 should ignore public ACLs for this bucket and
+ // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore
+ // all public ACLs on this bucket and objects in this bucket.
+ //
+ // Enabling this setting doesn't affect the persistence of any existing ACLs and
+ // doesn't prevent new public ACLs from being set.
+ IgnorePublicAcls *bool
+
+ // Specifies whether Amazon S3 should restrict public bucket policies for this
+ // bucket. Setting this element to TRUE restricts access to this bucket to only
+ // Amazon Web Services service principals and authorized users within this account
+ // if the bucket has a public policy.
+ //
+ // Enabling this setting doesn't affect previously stored bucket policies, except
+ // that public and cross-account access within any public bucket policy, including
+ // non-public delegation to specific accounts, is blocked.
+ RestrictPublicBuckets *bool
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the configuration for publishing messages to an Amazon Simple Queue
+// Service (Amazon SQS) queue when Amazon S3 detects specified events.
+type QueueConfiguration struct {
+
+ // A collection of bucket events for which to send notifications
+ //
+ // This member is required.
+ Events []Event
+
+ // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3
+ // publishes a message when it detects events of the specified type.
+ //
+ // This member is required.
+ QueueArn *string
+
+ // Specifies object key name filtering rules. For information about key name
+ // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide.
+ //
+ // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html
+ Filter *NotificationConfigurationFilter
+
+ // An optional unique identifier for configurations in a notification
+ // configuration. If you don't provide one, Amazon S3 will assign an ID.
+ Id *string
+
+ noSmithyDocumentSerde
+}
+
+// The container for the records event.
+type RecordsEvent struct {
+
+ // The byte array of partial, one or more result records. S3 Select doesn't
+ // guarantee that a record will be self-contained in one record frame. To ensure
+ // continuous streaming of data, S3 Select might split the same record across
+ // multiple record frames instead of aggregating the results in memory. Some S3
+ // clients (for example, the SDK for Java) handle this behavior by creating a
+ // ByteStream out of the response by default. Other clients might not handle this
+ // behavior by default. In those cases, you must aggregate the results on the
+ // client side and parse the response.
+ Payload []byte
+
+ noSmithyDocumentSerde
+}
+
+// Specifies how requests are redirected. In the event of an error, you can
+// specify a different error code to return.
+type Redirect struct {
+
+ // The host name to use in the redirect request.
+ HostName *string
+
+ // The HTTP redirect code to use on the response. Not required if one of the
+ // siblings is present.
+ HttpRedirectCode *string
+
+ // Protocol to use when redirecting requests. The default is the protocol that is
+ // used in the original request.
+ Protocol Protocol
+
+ // The object key prefix to use in the redirect request. For example, to redirect
+ // requests for all pages with prefix docs/ (objects in the docs/ folder) to
+ // documents/ , you can set a condition block with KeyPrefixEquals set to docs/
+ // and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one
+ // of the siblings is present. Can be present only if ReplaceKeyWith is not
+ // provided.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ ReplaceKeyPrefixWith *string
+
+ // The specific object key to use in the redirect request. For example, redirect
+ // request to error.html . Not required if one of the siblings is present. Can be
+ // present only if ReplaceKeyPrefixWith is not provided.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ ReplaceKeyWith *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the redirect behavior of all requests to a website endpoint of an
+// Amazon S3 bucket.
+type RedirectAllRequestsTo struct {
+
+ // Name of the host where requests are redirected.
+ //
+ // This member is required.
+ HostName *string
+
+ // Protocol to use when redirecting requests. The default is the protocol that is
+ // used in the original request.
+ Protocol Protocol
+
+ noSmithyDocumentSerde
+}
+
+// A filter that you can specify for selection for modifications on replicas.
+// Amazon S3 doesn't replicate replica modifications by default. In the latest
+// version of replication configuration (when Filter is specified), you can
+// specify this element and set the status to Enabled to replicate modifications
+// on replicas.
+//
+// If you don't specify the Filter element, Amazon S3 assumes that the replication
+// configuration is the earlier version, V1. In the earlier version, this element
+// is not allowed.
+type ReplicaModifications struct {
+
+ // Specifies whether Amazon S3 replicates modifications on replicas.
+ //
+ // This member is required.
+ Status ReplicaModificationsStatus
+
+ noSmithyDocumentSerde
+}
+
+// A container for replication rules. You can add up to 1,000 rules. The maximum
+// size of a replication configuration is 2 MB.
+type ReplicationConfiguration struct {
+
+ // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role
+ // that Amazon S3 assumes when replicating objects. For more information, see [How to Set Up Replication]in
+ // the Amazon S3 User Guide.
+ //
+ // [How to Set Up Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html
+ //
+ // This member is required.
+ Role *string
+
+ // A container for one or more replication rules. A replication configuration must
+ // have at least one rule and can contain a maximum of 1,000 rules.
+ //
+ // This member is required.
+ Rules []ReplicationRule
+
+ noSmithyDocumentSerde
+}
+
+// Specifies which Amazon S3 objects to replicate and where to store the replicas.
+type ReplicationRule struct {
+
+ // A container for information about the replication destination and its
+ // configurations including enabling the S3 Replication Time Control (S3 RTC).
+ //
+ // This member is required.
+ Destination *Destination
+
+ // Specifies whether the rule is enabled.
+ //
+ // This member is required.
+ Status ReplicationRuleStatus
+
+ // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter
+ // in your replication configuration, you must also include a
+ // DeleteMarkerReplication element. If your Filter includes a Tag element, the
+ // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does
+ // not support replicating delete markers for tag-based rules. For an example
+ // configuration, see [Basic Rule Configuration].
+ //
+ // For more information about delete marker replication, see [Basic Rule Configuration].
+ //
+ // If you are using an earlier version of the replication configuration, Amazon S3
+ // handles replication of delete markers differently. For more information, see [Backward Compatibility].
+ //
+ // [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html
+ // [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations
+ DeleteMarkerReplication *DeleteMarkerReplication
+
+ // Optional configuration to replicate existing source bucket objects.
+ //
+ // This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in
+ // the Amazon S3 User Guide.
+ //
+ // [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html
+ ExistingObjectReplication *ExistingObjectReplication
+
+ // A filter that identifies the subset of objects to which the replication rule
+ // applies. A Filter must specify exactly one Prefix , Tag , or an And child
+ // element.
+ Filter *ReplicationRuleFilter
+
+ // A unique identifier for the rule. The maximum value is 255 characters.
+ ID *string
+
+ // An object key name prefix that identifies the object or objects to which the
+ // rule applies. The maximum prefix length is 1,024 characters. To include all
+ // objects in a bucket, specify an empty string.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ //
+ // Deprecated: This member has been deprecated.
+ Prefix *string
+
+ // The priority indicates which rule has precedence whenever two or more
+ // replication rules conflict. Amazon S3 will attempt to replicate objects
+ // according to all replication rules. However, if there are two or more rules with
+ // the same destination bucket, then objects will be replicated according to the
+ // rule with the highest priority. The higher the number, the higher the priority.
+ //
+ // For more information, see [Replication] in the Amazon S3 User Guide.
+ //
+ // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html
+ Priority *int32
+
+ // A container that describes additional filters for identifying the source
+ // objects that you want to replicate. You can choose to enable or disable the
+ // replication of these objects. Currently, Amazon S3 supports only the filter that
+ // you can specify for objects created with server-side encryption using a customer
+ // managed key stored in Amazon Web Services Key Management Service (SSE-KMS).
+ SourceSelectionCriteria *SourceSelectionCriteria
+
+ noSmithyDocumentSerde
+}
+
+// A container for specifying rule filters. The filters determine the subset of
+// objects to which the rule applies. This element is required only if you specify
+// more than one filter.
+//
+// For example:
+//
+// - If you specify both a Prefix and a Tag filter, wrap these filters in an And
+// tag.
+//
+// - If you specify a filter based on multiple tags, wrap the Tag elements in an
+// And tag.
+type ReplicationRuleAndOperator struct {
+
+ // An object key name prefix that identifies the subset of objects to which the
+ // rule applies.
+ Prefix *string
+
+ // An array of tags containing key and value pairs.
+ Tags []Tag
+
+ noSmithyDocumentSerde
+}
+
+// A filter that identifies the subset of objects to which the replication rule
+// applies. A Filter must specify exactly one Prefix , Tag , or an And child
+// element.
+type ReplicationRuleFilter struct {
+
+ // A container for specifying rule filters. The filters determine the subset of
+ // objects to which the rule applies. This element is required only if you specify
+ // more than one filter. For example:
+ //
+ // - If you specify both a Prefix and a Tag filter, wrap these filters in an And
+ // tag.
+ //
+ // - If you specify a filter based on multiple tags, wrap the Tag elements in an
+ // And tag.
+ And *ReplicationRuleAndOperator
+
+ // An object key name prefix that identifies the subset of objects to which the
+ // rule applies.
+ //
+ // Replacement must be made for object keys containing special characters (such as
+ // carriage returns) when using XML requests. For more information, see [XML related object key constraints].
+ //
+ // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints
+ Prefix *string
+
+ // A container for specifying a tag key and value.
+ //
+ // The rule applies only to objects that have the tag in their tag set.
+ Tag *Tag
+
+ noSmithyDocumentSerde
+}
+
+// A container specifying S3 Replication Time Control (S3 RTC) related
+//
+// information, including whether S3 RTC is enabled and the time when all objects
+// and operations on objects must be replicated. Must be specified together with a
+// Metrics block.
+type ReplicationTime struct {
+
+ // Specifies whether the replication time is enabled.
+ //
+ // This member is required.
+ Status ReplicationTimeStatus
+
+ // A container specifying the time by which replication should be complete for
+ // all objects and operations on objects.
+ //
+ // This member is required.
+ Time *ReplicationTimeValue
+
+ noSmithyDocumentSerde
+}
+
+// A container specifying the time value for S3 Replication Time Control (S3 RTC)
+//
+// and replication metrics EventThreshold .
+type ReplicationTimeValue struct {
+
+ // Contains an integer specifying time in minutes.
+ //
+ // Valid value: 15
+ Minutes *int32
+
+ noSmithyDocumentSerde
+}
+
+// Container for Payer.
+type RequestPaymentConfiguration struct {
+
+ // Specifies who pays for the download and request fees.
+ //
+ // This member is required.
+ Payer Payer
+
+ noSmithyDocumentSerde
+}
+
+// Container for specifying if periodic QueryProgress messages should be sent.
+type RequestProgress struct {
+
+ // Specifies whether periodic QueryProgress frames should be sent. Valid values:
+ // TRUE, FALSE. Default value: FALSE.
+ Enabled *bool
+
+ noSmithyDocumentSerde
+}
+
+// Container for restore job parameters.
+type RestoreRequest struct {
+
+ // Lifetime of the active copy in days. Do not use with restores that specify
+ // OutputLocation .
+ //
+ // The Days element is required for regular restores, and must not be provided for
+ // select requests.
+ Days *int32
+
+ // The optional description for the job.
+ Description *string
+
+ // S3 Glacier related parameters pertaining to this job. Do not use with restores
+ // that specify OutputLocation .
+ GlacierJobParameters *GlacierJobParameters
+
+ // Describes the location where the restore job's output is stored.
+ OutputLocation *OutputLocation
+
+ // Amazon S3 Select is no longer available to new customers. Existing customers of
+ // Amazon S3 Select can continue to use the feature as usual. [Learn more]
+ //
+ // Describes the parameters for Select job types.
+ //
+ // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/
+ SelectParameters *SelectParameters
+
+ // Retrieval tier at which the restore will be processed.
+ Tier Tier
+
+ // Amazon S3 Select is no longer available to new customers. Existing customers of
+ // Amazon S3 Select can continue to use the feature as usual. [Learn more]
+ //
+ // Type of restore request.
+ //
+ // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/
+ Type RestoreRequestType
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the restoration status of an object. Objects in certain storage
+// classes must be restored before they can be retrieved. For more information
+// about these storage classes and how to work with archived objects, see [Working with archived objects]in the
+// Amazon S3 User Guide.
+//
+// This functionality is not supported for directory buckets. Only the S3 Express
+// One Zone storage class is supported by directory buckets to store objects.
+//
+// [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html
+type RestoreStatus struct {
+
+ // Specifies whether the object is currently being restored. If the object
+ // restoration is in progress, the header returns the value TRUE . For example:
+ //
+ // x-amz-optional-object-attributes: IsRestoreInProgress="true"
+ //
+ // If the object restoration has completed, the header returns the value FALSE .
+ // For example:
+ //
+ // x-amz-optional-object-attributes: IsRestoreInProgress="false",
+ // RestoreExpiryDate="2012-12-21T00:00:00.000Z"
+ //
+ // If the object hasn't been restored, there is no header response.
+ IsRestoreInProgress *bool
+
+ // Indicates when the restored copy will expire. This value is populated only if
+ // the object has already been restored. For example:
+ //
+ // x-amz-optional-object-attributes: IsRestoreInProgress="false",
+ // RestoreExpiryDate="2012-12-21T00:00:00.000Z"
+ RestoreExpiryDate *time.Time
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the redirect behavior and when a redirect is applied. For more
+// information about routing rules, see [Configuring advanced conditional redirects]in the Amazon S3 User Guide.
+//
+// [Configuring advanced conditional redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects
+type RoutingRule struct {
+
+ // Container for redirect information. You can redirect requests to another host,
+ // to another page, or with another protocol. In the event of an error, you can
+ // specify a different error code to return.
+ //
+ // This member is required.
+ Redirect *Redirect
+
+ // A container for describing a condition that must be met for the specified
+ // redirect to apply. For example, 1. If request is for pages in the /docs folder,
+ // redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+ // redirect request to another host where you might process the error.
+ Condition *Condition
+
+ noSmithyDocumentSerde
+}
+
+// A container for object key name prefix and suffix filtering rules.
+type S3KeyFilter struct {
+
+ // A list of containers for the key-value pair that defines the criteria for the
+ // filter rule.
+ FilterRules []FilterRule
+
+ noSmithyDocumentSerde
+}
+
+// Describes an Amazon S3 location that will receive the results of the restore
+// request.
+type S3Location struct {
+
+ // The name of the bucket where the restore results will be placed.
+ //
+ // This member is required.
+ BucketName *string
+
+ // The prefix that is prepended to the restore results for this request.
+ //
+ // This member is required.
+ Prefix *string
+
+ // A list of grants that control access to the staged results.
+ AccessControlList []Grant
+
+ // The canned ACL to apply to the restore results.
+ CannedACL ObjectCannedACL
+
+ // Contains the type of server-side encryption used.
+ Encryption *Encryption
+
+ // The class of storage used to store the restore results.
+ StorageClass StorageClass
+
+ // The tag-set that is applied to the restore results.
+ Tagging *Tagging
+
+ // A list of metadata to store with the restore results in S3.
+ UserMetadata []MetadataEntry
+
+ noSmithyDocumentSerde
+}
+
+// The destination information for the metadata table configuration. The
+//
+// destination table bucket must be in the same Region and Amazon Web Services
+// account as the general purpose bucket. The specified metadata table name must be
+// unique within the aws_s3_metadata namespace in the destination table bucket.
+type S3TablesDestination struct {
+
+ // The Amazon Resource Name (ARN) for the table bucket that's specified as the
+ // destination in the metadata table configuration. The destination table bucket
+ // must be in the same Region and Amazon Web Services account as the general
+ // purpose bucket.
+ //
+ // This member is required.
+ TableBucketArn *string
+
+ // The name for the metadata table in your metadata table configuration. The
+ // specified metadata table name must be unique within the aws_s3_metadata
+ // namespace in the destination table bucket.
+ //
+ // This member is required.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+// The destination information for the metadata table configuration. The
+//
+// destination table bucket must be in the same Region and Amazon Web Services
+// account as the general purpose bucket. The specified metadata table name must be
+// unique within the aws_s3_metadata namespace in the destination table bucket.
+type S3TablesDestinationResult struct {
+
+ // The Amazon Resource Name (ARN) for the metadata table in the metadata table
+ // configuration. The specified metadata table name must be unique within the
+ // aws_s3_metadata namespace in the destination table bucket.
+ //
+ // This member is required.
+ TableArn *string
+
+ // The Amazon Resource Name (ARN) for the table bucket that's specified as the
+ // destination in the metadata table configuration. The destination table bucket
+ // must be in the same Region and Amazon Web Services account as the general
+ // purpose bucket.
+ //
+ // This member is required.
+ TableBucketArn *string
+
+ // The name for the metadata table in your metadata table configuration. The
+ // specified metadata table name must be unique within the aws_s3_metadata
+ // namespace in the destination table bucket.
+ //
+ // This member is required.
+ TableName *string
+
+ // The table bucket namespace for the metadata table in your metadata table
+ // configuration. This value is always aws_s3_metadata .
+ //
+ // This member is required.
+ TableNamespace *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the byte range of the object to get the records from. A record is
+// processed when its first byte is contained by the range. This parameter is
+// optional, but when specified, it must not be empty. See RFC 2616, Section
+// 14.35.1 about how to specify the start and end of the range.
+type ScanRange struct {
+
+ // Specifies the end of the byte range. This parameter is optional. Valid values:
+ // non-negative integers. The default value is one less than the size of the object
+ // being queried. If only the End parameter is supplied, it is interpreted to mean
+ // scan the last N bytes of the file. For example, 50 means scan the last 50 bytes.
+ End *int64
+
+ // Specifies the start of the byte range. This parameter is optional. Valid
+ // values: non-negative integers. The default value is 0. If only start is
+ // supplied, it means scan from that point to the end of the file. For example, 50
+ // means scan from byte 50 until the end of the file.
+ Start *int64
+
+ noSmithyDocumentSerde
+}
+
+// The container for selecting objects from a content event stream.
+//
+// The following types satisfy this interface:
+//
+// SelectObjectContentEventStreamMemberCont
+// SelectObjectContentEventStreamMemberEnd
+// SelectObjectContentEventStreamMemberProgress
+// SelectObjectContentEventStreamMemberRecords
+// SelectObjectContentEventStreamMemberStats
+type SelectObjectContentEventStream interface {
+ isSelectObjectContentEventStream()
+}
+
+// The Continuation Event.
+type SelectObjectContentEventStreamMemberCont struct {
+ Value ContinuationEvent
+
+ noSmithyDocumentSerde
+}
+
+func (*SelectObjectContentEventStreamMemberCont) isSelectObjectContentEventStream() {}
+
+// The End Event.
+type SelectObjectContentEventStreamMemberEnd struct {
+ Value EndEvent
+
+ noSmithyDocumentSerde
+}
+
+func (*SelectObjectContentEventStreamMemberEnd) isSelectObjectContentEventStream() {}
+
+// The Progress Event.
+type SelectObjectContentEventStreamMemberProgress struct {
+ Value ProgressEvent
+
+ noSmithyDocumentSerde
+}
+
+func (*SelectObjectContentEventStreamMemberProgress) isSelectObjectContentEventStream() {}
+
+// The Records Event.
+type SelectObjectContentEventStreamMemberRecords struct {
+ Value RecordsEvent
+
+ noSmithyDocumentSerde
+}
+
+func (*SelectObjectContentEventStreamMemberRecords) isSelectObjectContentEventStream() {}
+
+// The Stats Event.
+type SelectObjectContentEventStreamMemberStats struct {
+ Value StatsEvent
+
+ noSmithyDocumentSerde
+}
+
+func (*SelectObjectContentEventStreamMemberStats) isSelectObjectContentEventStream() {}
+
+// Amazon S3 Select is no longer available to new customers. Existing customers of
+// Amazon S3 Select can continue to use the feature as usual. [Learn more]
+//
+// Describes the parameters for Select job types.
+//
+// Learn [How to optimize querying your data in Amazon S3] using [Amazon Athena], [S3 Object Lambda], or client-side filtering.
+//
+// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/
+// [How to optimize querying your data in Amazon S3]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/
+// [Amazon Athena]: https://docs.aws.amazon.com/athena/latest/ug/what-is.html
+// [S3 Object Lambda]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html
+type SelectParameters struct {
+
+ // Amazon S3 Select is no longer available to new customers. Existing customers of
+ // Amazon S3 Select can continue to use the feature as usual. [Learn more]
+ //
+ // The expression that is used to query the object.
+ //
+ // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/
+ //
+ // This member is required.
+ Expression *string
+
+ // The type of the provided expression (for example, SQL).
+ //
+ // This member is required.
+ ExpressionType ExpressionType
+
+ // Describes the serialization format of the object.
+ //
+ // This member is required.
+ InputSerialization *InputSerialization
+
+ // Describes how the results of the Select job are serialized.
+ //
+ // This member is required.
+ OutputSerialization *OutputSerialization
+
+ noSmithyDocumentSerde
+}
+
+// Describes the default server-side encryption to apply to new objects in the
+// bucket. If a PUT Object request doesn't specify any server-side encryption, this
+// default encryption will be applied. For more information, see [PutBucketEncryption].
+//
+// - General purpose buckets - If you don't specify a customer managed key at
+// configuration, Amazon S3 automatically creates an Amazon Web Services KMS key (
+// aws/s3 ) in your Amazon Web Services account the first time that you add an
+// object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS
+// key for SSE-KMS.
+//
+// - Directory buckets - Your SSE-KMS configuration can only support 1 [customer managed key]per
+// directory bucket's lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported.
+//
+// - Directory buckets - For directory buckets, there are only two supported
+// options for server-side encryption: SSE-S3 and SSE-KMS.
+//
+// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html
+// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
+type ServerSideEncryptionByDefault struct {
+
+ // Server-side encryption algorithm to use for the default encryption.
+ //
+ // For directory buckets, there are only two supported values for server-side
+ // encryption: AES256 and aws:kms .
+ //
+ // This member is required.
+ SSEAlgorithm ServerSideEncryption
+
+ // Amazon Web Services Key Management Service (KMS) customer managed key ID to use
+ // for the default encryption.
+ //
+ // - General purpose buckets - This parameter is allowed if and only if
+ // SSEAlgorithm is set to aws:kms or aws:kms:dsse .
+ //
+ // - Directory buckets - This parameter is allowed if and only if SSEAlgorithm is
+ // set to aws:kms .
+ //
+ // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the
+ // KMS key.
+ //
+ // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // - Key ARN:
+ // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // - Key Alias: alias/alias-name
+ //
+ // If you are using encryption with cross-account or Amazon Web Services service
+ // operations, you must use a fully qualified KMS key ARN. For more information,
+ // see [Using encryption for cross-account operations].
+ //
+ // - General purpose buckets - If you're specifying a customer managed KMS key,
+ // we recommend using a fully qualified KMS key ARN. If you use a KMS key alias
+ // instead, then KMS resolves the key within the requester’s account. This behavior
+ // can result in data that's encrypted with a KMS key that belongs to the
+ // requester, and not the bucket owner. Also, if you use a key ID, you can run into
+ // a LogDestination undeliverable error when creating a VPC flow log.
+ //
+ // - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory
+ // bucket, only use the key ID or key ARN. The key alias format of the KMS key
+ // isn't supported.
+ //
+ // Amazon S3 only supports symmetric encryption KMS keys. For more information,
+ // see [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide.
+ //
+ // [Using encryption for cross-account operations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy
+ // [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+ // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html
+ KMSMasterKeyID *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the default server-side-encryption configuration.
+type ServerSideEncryptionConfiguration struct {
+
+ // Container for information about a particular server-side encryption
+ // configuration rule.
+ //
+ // This member is required.
+ Rules []ServerSideEncryptionRule
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the default server-side encryption configuration.
+//
+// - General purpose buckets - If you're specifying a customer managed KMS key,
+// we recommend using a fully qualified KMS key ARN. If you use a KMS key alias
+// instead, then KMS resolves the key within the requester’s account. This behavior
+// can result in data that's encrypted with a KMS key that belongs to the
+// requester, and not the bucket owner.
+//
+// - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory
+// bucket, only use the key ID or key ARN. The key alias format of the KMS key
+// isn't supported.
+//
+// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
+type ServerSideEncryptionRule struct {
+
+ // Specifies the default server-side encryption to apply to new objects in the
+ // bucket. If a PUT Object request doesn't specify any server-side encryption, this
+ // default encryption will be applied.
+ ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault
+
+ // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side
+ // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects
+ // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3
+ // to use an S3 Bucket Key.
+ //
+ // - General purpose buckets - By default, S3 Bucket Key is not enabled. For
+ // more information, see [Amazon S3 Bucket Keys]in the Amazon S3 User Guide.
+ //
+ // - Directory buckets - S3 Bucket Keys are always enabled for GET and PUT
+ // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't
+ // supported, when you copy SSE-KMS encrypted objects from general purpose buckets
+ // to directory buckets, from directory buckets to general purpose buckets, or
+ // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a
+ // call to KMS every time a copy request is made for a KMS-encrypted object.
+ //
+ // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
+ // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
+ // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job
+ // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
+ // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops
+ BucketKeyEnabled *bool
+
+ noSmithyDocumentSerde
+}
+
+// The established temporary security credentials of the session.
+//
+// Directory buckets - These session credentials are only supported for the
+// authentication and authorization of Zonal endpoint API operations on directory
+// buckets.
+type SessionCredentials struct {
+
+ // A unique identifier that's associated with a secret access key. The access key
+ // ID and the secret access key are used together to sign programmatic Amazon Web
+ // Services requests cryptographically.
+ //
+ // This member is required.
+ AccessKeyId *string
+
+ // Temporary security credentials expire after a specified interval. After
+ // temporary credentials expire, any calls that you make with those credentials
+ // will fail. So you must generate a new set of temporary credentials. Temporary
+ // credentials cannot be extended or refreshed beyond the original specified
+ // interval.
+ //
+ // This member is required.
+ Expiration *time.Time
+
+ // A key that's used with the access key ID to cryptographically sign programmatic
+ // Amazon Web Services requests. Signing a request identifies the sender and
+ // prevents the request from being altered.
+ //
+ // This member is required.
+ SecretAccessKey *string
+
+ // A part of the temporary security credentials. The session token is used to
+ // validate the temporary security credentials.
+ //
+ // This member is required.
+ SessionToken *string
+
+ noSmithyDocumentSerde
+}
+
+// To use simple format for S3 keys for log objects, set SimplePrefix to an empty
+// object.
+//
+// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
+type SimplePrefix struct {
+ noSmithyDocumentSerde
+}
+
+// A container that describes additional filters for identifying the source
+// objects that you want to replicate. You can choose to enable or disable the
+// replication of these objects. Currently, Amazon S3 supports only the filter that
+// you can specify for objects created with server-side encryption using a customer
+// managed key stored in Amazon Web Services Key Management Service (SSE-KMS).
+type SourceSelectionCriteria struct {
+
+ // A filter that you can specify for selections for modifications on replicas.
+ // Amazon S3 doesn't replicate replica modifications by default. In the latest
+ // version of replication configuration (when Filter is specified), you can
+ // specify this element and set the status to Enabled to replicate modifications
+ // on replicas.
+ //
+ // If you don't specify the Filter element, Amazon S3 assumes that the replication
+ // configuration is the earlier version, V1. In the earlier version, this element
+ // is not allowed
+ ReplicaModifications *ReplicaModifications
+
+ // A container for filter information for the selection of Amazon S3 objects
+ // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria
+ // in the replication configuration, this element is required.
+ SseKmsEncryptedObjects *SseKmsEncryptedObjects
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the use of SSE-KMS to encrypt delivered inventory reports.
+type SSEKMS struct {
+
+ // Specifies the ID of the Key Management Service (KMS) symmetric encryption
+ // customer managed key to use for encrypting inventory reports.
+ //
+ // This member is required.
+ KeyId *string
+
+ noSmithyDocumentSerde
+}
+
+// A container for filter information for the selection of S3 objects encrypted
+// with Amazon Web Services KMS.
+type SseKmsEncryptedObjects struct {
+
+ // Specifies whether Amazon S3 replicates objects created with server-side
+ // encryption using an Amazon Web Services KMS key stored in Amazon Web Services
+ // Key Management Service.
+ //
+ // This member is required.
+ Status SseKmsEncryptedObjectsStatus
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the use of SSE-S3 to encrypt delivered inventory reports.
+type SSES3 struct {
+ noSmithyDocumentSerde
+}
+
+// Container for the stats details.
+type Stats struct {
+
+ // The total number of uncompressed object bytes processed.
+ BytesProcessed *int64
+
+ // The total number of bytes of records payload data returned.
+ BytesReturned *int64
+
+ // The total number of object bytes scanned.
+ BytesScanned *int64
+
+ noSmithyDocumentSerde
+}
+
+// Container for the Stats Event.
+type StatsEvent struct {
+
+ // The Stats event details.
+ Details *Stats
+
+ noSmithyDocumentSerde
+}
+
+// Specifies data related to access patterns to be collected and made available to
+// analyze the tradeoffs between different storage classes for an Amazon S3 bucket.
+type StorageClassAnalysis struct {
+
+ // Specifies how data related to the storage class analysis for an Amazon S3
+ // bucket should be exported.
+ DataExport *StorageClassAnalysisDataExport
+
+ noSmithyDocumentSerde
+}
+
+// Container for data related to the storage class analysis for an Amazon S3
+// bucket for export.
+type StorageClassAnalysisDataExport struct {
+
+ // The place to store the data for an analysis.
+ //
+ // This member is required.
+ Destination *AnalyticsExportDestination
+
+ // The version of the output schema to use when exporting data. Must be V_1 .
+ //
+ // This member is required.
+ OutputSchemaVersion StorageClassAnalysisSchemaVersion
+
+ noSmithyDocumentSerde
+}
+
+// A container of a key value name pair.
+type Tag struct {
+
+ // Name of the object key.
+ //
+ // This member is required.
+ Key *string
+
+ // Value of the tag.
+ //
+ // This member is required.
+ Value *string
+
+ noSmithyDocumentSerde
+}
+
+// Container for TagSet elements.
+type Tagging struct {
+
+ // A collection for a set of tags
+ //
+ // This member is required.
+ TagSet []Tag
+
+ noSmithyDocumentSerde
+}
+
+// Container for granting information.
+//
+// Buckets that use the bucket owner enforced setting for Object Ownership don't
+// support target grants. For more information, see [Permissions server access log delivery]in the Amazon S3 User Guide.
+//
+// [Permissions server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general
+type TargetGrant struct {
+
+ // Container for the person being granted permissions.
+ Grantee *Grantee
+
+ // Logging permissions assigned to the grantee for the bucket.
+ Permission BucketLogsPermission
+
+ noSmithyDocumentSerde
+}
+
+// Amazon S3 key format for log objects. Only one format, PartitionedPrefix or
+// SimplePrefix, is allowed.
+type TargetObjectKeyFormat struct {
+
+ // Partitioned S3 key for log objects.
+ PartitionedPrefix *PartitionedPrefix
+
+ // To use the simple format for S3 keys for log objects. To specify SimplePrefix
+ // format, set SimplePrefix to {}.
+ SimplePrefix *SimplePrefix
+
+ noSmithyDocumentSerde
+}
+
+// The S3 Intelligent-Tiering storage class is designed to optimize storage costs
+// by automatically moving data to the most cost-effective storage access tier,
+// without additional operational overhead.
+type Tiering struct {
+
+ // S3 Intelligent-Tiering access tier. See [Storage class for automatically optimizing frequently and infrequently accessed objects] for a list of access tiers in the S3
+ // Intelligent-Tiering storage class.
+ //
+ // [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access
+ //
+ // This member is required.
+ AccessTier IntelligentTieringAccessTier
+
+ // The number of consecutive days of no access after which an object will be
+ // eligible to be transitioned to the corresponding tier. The minimum number of
+ // days specified for Archive Access tier must be at least 90 days and Deep Archive
+ // Access tier must be at least 180 days. The maximum can be up to 2 years (730
+ // days).
+ //
+ // This member is required.
+ Days *int32
+
+ noSmithyDocumentSerde
+}
+
+// A container for specifying the configuration for publication of messages to an
+// Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 detects
+// specified events.
+type TopicConfiguration struct {
+
+ // The Amazon S3 bucket event about which to send notifications. For more
+ // information, see [Supported Event Types]in the Amazon S3 User Guide.
+ //
+ // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
+ //
+ // This member is required.
+ Events []Event
+
+ // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3
+ // publishes a message when it detects events of the specified type.
+ //
+ // This member is required.
+ TopicArn *string
+
+ // Specifies object key name filtering rules. For information about key name
+ // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide.
+ //
+ // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html
+ Filter *NotificationConfigurationFilter
+
+ // An optional unique identifier for configurations in a notification
+ // configuration. If you don't provide one, Amazon S3 will assign an ID.
+ Id *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies when an object transitions to a specified storage class. For more
+// information about Amazon S3 lifecycle configuration rules, see [Transitioning Objects Using Amazon S3 Lifecycle]in the Amazon S3
+// User Guide.
+//
+// [Transitioning Objects Using Amazon S3 Lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html
+type Transition struct {
+
+ // Indicates when objects are transitioned to the specified storage class. The
+ // date value must be in ISO 8601 format. The time is always midnight UTC.
+ Date *time.Time
+
+ // Indicates the number of days after creation when objects are transitioned to
+ // the specified storage class. If the specified storage class is
+ // INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE , valid values are
+ // 0 or positive integers. If the specified storage class is STANDARD_IA or
+ // ONEZONE_IA , valid values are positive integers greater than 30 . Be aware that
+ // some storage classes have a minimum storage duration and that you're charged for
+ // transitioning objects before their minimum storage duration. For more
+ // information, see [Constraints and considerations for transitions]in the Amazon S3 User Guide.
+ //
+ // [Constraints and considerations for transitions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-transition-general-considerations.html#lifecycle-configuration-constraints
+ Days *int32
+
+ // The storage class to which you want the object to transition.
+ StorageClass TransitionStorageClass
+
+ noSmithyDocumentSerde
+}
+
+// Describes the versioning state of an Amazon S3 bucket. For more information,
+// see [PUT Bucket versioning]in the Amazon S3 API Reference.
+//
+// [PUT Bucket versioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html
+type VersioningConfiguration struct {
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA delete.
+ // If the bucket has never been so configured, this element is not returned.
+ MFADelete MFADelete
+
+ // The versioning state of the bucket.
+ Status BucketVersioningStatus
+
+ noSmithyDocumentSerde
+}
+
+// Specifies website configuration parameters for an Amazon S3 bucket.
+type WebsiteConfiguration struct {
+
+ // The name of the error document for the website.
+ ErrorDocument *ErrorDocument
+
+ // The name of the index document for the website.
+ IndexDocument *IndexDocument
+
+ // The redirect behavior for every request to this bucket's website endpoint.
+ //
+ // If you specify this property, you can't specify any other property.
+ RedirectAllRequestsTo *RedirectAllRequestsTo
+
+ // Rules that define when a redirect is applied and the redirect behavior.
+ RoutingRules []RoutingRule
+
+ noSmithyDocumentSerde
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+// UnknownUnionMember is returned when a union member is returned over the wire,
+// but has an unknown tag.
+type UnknownUnionMember struct {
+ Tag string
+ Value []byte
+
+ noSmithyDocumentSerde
+}
+
+func (*UnknownUnionMember) isAnalyticsFilter() {}
+func (*UnknownUnionMember) isMetricsFilter() {}
+func (*UnknownUnionMember) isSelectObjectContentEventStream() {}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go
new file mode 100644
index 000000000..0e664c59c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go
@@ -0,0 +1,23 @@
+package s3
+
+// This contains helper methods to set resolver URI into the context object. If they are ever used for
+// something other than S3, they should be moved to internal/context/context.go
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type s3resolvedURI struct{}
+
+// setS3ResolvedURI sets the URI as resolved by the EndpointResolverV2
+func setS3ResolvedURI(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, s3resolvedURI{}, value)
+}
+
+// getS3ResolvedURI gets the URI as resolved by EndpointResolverV2
+func getS3ResolvedURI(ctx context.Context) string {
+ v, _ := middleware.GetStackValue(ctx, s3resolvedURI{}).(string)
+ return v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go
new file mode 100644
index 000000000..97a56bd31
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go
@@ -0,0 +1,5702 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package s3
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/service/s3/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type validateOpAbortMultipartUpload struct {
+}
+
+func (*validateOpAbortMultipartUpload) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAbortMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AbortMultipartUploadInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAbortMultipartUploadInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCompleteMultipartUpload struct {
+}
+
+func (*validateOpCompleteMultipartUpload) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCompleteMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CompleteMultipartUploadInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCompleteMultipartUploadInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCopyObject struct {
+}
+
+func (*validateOpCopyObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCopyObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CopyObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCopyObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateBucket struct {
+}
+
+func (*validateOpCreateBucket) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateBucketInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateBucketInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateBucketMetadataTableConfiguration struct {
+}
+
+func (*validateOpCreateBucketMetadataTableConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateBucketMetadataTableConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateMultipartUpload struct {
+}
+
+func (*validateOpCreateMultipartUpload) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateMultipartUpload) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateMultipartUploadInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateMultipartUploadInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateSession struct {
+}
+
+func (*validateOpCreateSession) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateSession) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateSessionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateSessionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketAnalyticsConfiguration struct {
+}
+
+func (*validateOpDeleteBucketAnalyticsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketAnalyticsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketAnalyticsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketCors struct {
+}
+
+func (*validateOpDeleteBucketCors) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketCorsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketCorsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketEncryption struct {
+}
+
+func (*validateOpDeleteBucketEncryption) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketEncryptionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketEncryptionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucket struct {
+}
+
+func (*validateOpDeleteBucket) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketIntelligentTieringConfiguration struct {
+}
+
+func (*validateOpDeleteBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketIntelligentTieringConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketIntelligentTieringConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketInventoryConfiguration struct {
+}
+
+func (*validateOpDeleteBucketInventoryConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketInventoryConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketInventoryConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketLifecycle struct {
+}
+
+func (*validateOpDeleteBucketLifecycle) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketLifecycleInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketLifecycleInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketMetadataTableConfiguration struct {
+}
+
+func (*validateOpDeleteBucketMetadataTableConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketMetadataTableConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketMetricsConfiguration struct {
+}
+
+func (*validateOpDeleteBucketMetricsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketMetricsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketMetricsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketOwnershipControls struct {
+}
+
+func (*validateOpDeleteBucketOwnershipControls) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketOwnershipControlsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketOwnershipControlsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketPolicy struct {
+}
+
+func (*validateOpDeleteBucketPolicy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketPolicyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketPolicyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketReplication struct {
+}
+
+func (*validateOpDeleteBucketReplication) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketReplicationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketReplicationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketTagging struct {
+}
+
+func (*validateOpDeleteBucketTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBucketWebsite struct {
+}
+
+func (*validateOpDeleteBucketWebsite) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBucketWebsiteInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBucketWebsiteInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteObject struct {
+}
+
+func (*validateOpDeleteObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteObjects struct {
+}
+
+func (*validateOpDeleteObjects) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteObjectsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteObjectsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteObjectTagging struct {
+}
+
+func (*validateOpDeleteObjectTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteObjectTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteObjectTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeletePublicAccessBlock struct {
+}
+
+func (*validateOpDeletePublicAccessBlock) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeletePublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeletePublicAccessBlockInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeletePublicAccessBlockInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketAccelerateConfiguration struct {
+}
+
+func (*validateOpGetBucketAccelerateConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketAccelerateConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketAccelerateConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketAcl struct {
+}
+
+func (*validateOpGetBucketAcl) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketAclInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketAclInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketAnalyticsConfiguration struct {
+}
+
+func (*validateOpGetBucketAnalyticsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketAnalyticsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketAnalyticsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketCors struct {
+}
+
+func (*validateOpGetBucketCors) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketCorsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketCorsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketEncryption struct {
+}
+
+func (*validateOpGetBucketEncryption) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketEncryptionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketEncryptionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketIntelligentTieringConfiguration struct {
+}
+
+func (*validateOpGetBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketIntelligentTieringConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketIntelligentTieringConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketInventoryConfiguration struct {
+}
+
+func (*validateOpGetBucketInventoryConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketInventoryConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketInventoryConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketLifecycleConfiguration struct {
+}
+
+func (*validateOpGetBucketLifecycleConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketLifecycleConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketLifecycleConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketLocation struct {
+}
+
+func (*validateOpGetBucketLocation) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketLocation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketLocationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketLocationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketLogging struct {
+}
+
+func (*validateOpGetBucketLogging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketLoggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketLoggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketMetadataTableConfiguration struct {
+}
+
+func (*validateOpGetBucketMetadataTableConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketMetadataTableConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketMetricsConfiguration struct {
+}
+
+func (*validateOpGetBucketMetricsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketMetricsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketMetricsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketNotificationConfiguration struct {
+}
+
+func (*validateOpGetBucketNotificationConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketNotificationConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketNotificationConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketOwnershipControls struct {
+}
+
+func (*validateOpGetBucketOwnershipControls) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketOwnershipControlsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketOwnershipControlsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketPolicy struct {
+}
+
+func (*validateOpGetBucketPolicy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketPolicyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketPolicyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketPolicyStatus struct {
+}
+
+func (*validateOpGetBucketPolicyStatus) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketPolicyStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketPolicyStatusInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketPolicyStatusInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketReplication struct {
+}
+
+func (*validateOpGetBucketReplication) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketReplicationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketReplicationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketRequestPayment struct {
+}
+
+func (*validateOpGetBucketRequestPayment) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketRequestPaymentInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketRequestPaymentInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketTagging struct {
+}
+
+func (*validateOpGetBucketTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketVersioning struct {
+}
+
+func (*validateOpGetBucketVersioning) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketVersioningInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketVersioningInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetBucketWebsite struct {
+}
+
+func (*validateOpGetBucketWebsite) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetBucketWebsiteInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetBucketWebsiteInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectAcl struct {
+}
+
+func (*validateOpGetObjectAcl) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectAclInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectAclInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectAttributes struct {
+}
+
+func (*validateOpGetObjectAttributes) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectAttributes) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectAttributesInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectAttributesInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObject struct {
+}
+
+func (*validateOpGetObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectLegalHold struct {
+}
+
+func (*validateOpGetObjectLegalHold) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectLegalHoldInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectLegalHoldInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectLockConfiguration struct {
+}
+
+func (*validateOpGetObjectLockConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectLockConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectLockConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectRetention struct {
+}
+
+func (*validateOpGetObjectRetention) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectRetentionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectRetentionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectTagging struct {
+}
+
+func (*validateOpGetObjectTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetObjectTorrent struct {
+}
+
+func (*validateOpGetObjectTorrent) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetObjectTorrent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetObjectTorrentInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetObjectTorrentInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetPublicAccessBlock struct {
+}
+
+func (*validateOpGetPublicAccessBlock) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetPublicAccessBlockInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetPublicAccessBlockInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpHeadBucket struct {
+}
+
+func (*validateOpHeadBucket) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpHeadBucket) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*HeadBucketInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpHeadBucketInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpHeadObject struct {
+}
+
+func (*validateOpHeadObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpHeadObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*HeadObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpHeadObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListBucketAnalyticsConfigurations struct {
+}
+
+func (*validateOpListBucketAnalyticsConfigurations) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListBucketAnalyticsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListBucketAnalyticsConfigurationsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListBucketAnalyticsConfigurationsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListBucketIntelligentTieringConfigurations struct {
+}
+
+func (*validateOpListBucketIntelligentTieringConfigurations) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListBucketIntelligentTieringConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListBucketIntelligentTieringConfigurationsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListBucketIntelligentTieringConfigurationsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListBucketInventoryConfigurations struct {
+}
+
+func (*validateOpListBucketInventoryConfigurations) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListBucketInventoryConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListBucketInventoryConfigurationsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListBucketInventoryConfigurationsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListBucketMetricsConfigurations struct {
+}
+
+func (*validateOpListBucketMetricsConfigurations) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListBucketMetricsConfigurations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListBucketMetricsConfigurationsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListBucketMetricsConfigurationsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListMultipartUploads struct {
+}
+
+func (*validateOpListMultipartUploads) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListMultipartUploads) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListMultipartUploadsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListMultipartUploadsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListObjects struct {
+}
+
+func (*validateOpListObjects) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListObjects) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListObjectsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListObjectsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListObjectsV2 struct {
+}
+
+func (*validateOpListObjectsV2) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListObjectsV2) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListObjectsV2Input)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListObjectsV2Input(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListObjectVersions struct {
+}
+
+func (*validateOpListObjectVersions) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListObjectVersions) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListObjectVersionsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListObjectVersionsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListParts struct {
+}
+
+func (*validateOpListParts) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListParts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListPartsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListPartsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketAccelerateConfiguration struct {
+}
+
+func (*validateOpPutBucketAccelerateConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketAccelerateConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketAccelerateConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketAccelerateConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketAcl struct {
+}
+
+func (*validateOpPutBucketAcl) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketAclInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketAclInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketAnalyticsConfiguration struct {
+}
+
+func (*validateOpPutBucketAnalyticsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketAnalyticsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketAnalyticsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketAnalyticsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketCors struct {
+}
+
+func (*validateOpPutBucketCors) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketCors) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketCorsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketCorsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketEncryption struct {
+}
+
+func (*validateOpPutBucketEncryption) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketEncryption) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketEncryptionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketEncryptionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketIntelligentTieringConfiguration struct {
+}
+
+func (*validateOpPutBucketIntelligentTieringConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketIntelligentTieringConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketIntelligentTieringConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketIntelligentTieringConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketInventoryConfiguration struct {
+}
+
+func (*validateOpPutBucketInventoryConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketInventoryConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketInventoryConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketInventoryConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketLifecycleConfiguration struct {
+}
+
+func (*validateOpPutBucketLifecycleConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketLifecycleConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketLifecycleConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketLifecycleConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketLogging struct {
+}
+
+func (*validateOpPutBucketLogging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketLogging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketLoggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketLoggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketMetricsConfiguration struct {
+}
+
+func (*validateOpPutBucketMetricsConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketMetricsConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketMetricsConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketMetricsConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketNotificationConfiguration struct {
+}
+
+func (*validateOpPutBucketNotificationConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketNotificationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketNotificationConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketNotificationConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketOwnershipControls struct {
+}
+
+func (*validateOpPutBucketOwnershipControls) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketOwnershipControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketOwnershipControlsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketOwnershipControlsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketPolicy struct {
+}
+
+func (*validateOpPutBucketPolicy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketPolicyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketPolicyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketReplication struct {
+}
+
+func (*validateOpPutBucketReplication) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketReplication) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketReplicationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketReplicationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketRequestPayment struct {
+}
+
+func (*validateOpPutBucketRequestPayment) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketRequestPayment) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketRequestPaymentInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketRequestPaymentInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketTagging struct {
+}
+
+func (*validateOpPutBucketTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketVersioning struct {
+}
+
+func (*validateOpPutBucketVersioning) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketVersioning) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketVersioningInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketVersioningInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutBucketWebsite struct {
+}
+
+func (*validateOpPutBucketWebsite) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutBucketWebsite) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutBucketWebsiteInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutBucketWebsiteInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectAcl struct {
+}
+
+func (*validateOpPutObjectAcl) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectAclInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectAclInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObject struct {
+}
+
+func (*validateOpPutObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectLegalHold struct {
+}
+
+func (*validateOpPutObjectLegalHold) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectLegalHold) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectLegalHoldInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectLegalHoldInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectLockConfiguration struct {
+}
+
+func (*validateOpPutObjectLockConfiguration) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectLockConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectLockConfigurationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectLockConfigurationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectRetention struct {
+}
+
+func (*validateOpPutObjectRetention) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectRetention) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectRetentionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectRetentionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutObjectTagging struct {
+}
+
+func (*validateOpPutObjectTagging) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutObjectTagging) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutObjectTaggingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutObjectTaggingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutPublicAccessBlock struct {
+}
+
+func (*validateOpPutPublicAccessBlock) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutPublicAccessBlock) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutPublicAccessBlockInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutPublicAccessBlockInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpRestoreObject struct {
+}
+
+func (*validateOpRestoreObject) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpRestoreObject) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*RestoreObjectInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpRestoreObjectInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpSelectObjectContent struct {
+}
+
+func (*validateOpSelectObjectContent) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpSelectObjectContent) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*SelectObjectContentInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpSelectObjectContentInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUploadPartCopy struct {
+}
+
+func (*validateOpUploadPartCopy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUploadPartCopy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UploadPartCopyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUploadPartCopyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUploadPart struct {
+}
+
+func (*validateOpUploadPart) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUploadPart) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UploadPartInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUploadPartInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpWriteGetObjectResponse struct {
+}
+
+func (*validateOpWriteGetObjectResponse) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpWriteGetObjectResponse) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*WriteGetObjectResponseInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpWriteGetObjectResponseInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+func addOpAbortMultipartUploadValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAbortMultipartUpload{}, middleware.After)
+}
+
+func addOpCompleteMultipartUploadValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCompleteMultipartUpload{}, middleware.After)
+}
+
+func addOpCopyObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCopyObject{}, middleware.After)
+}
+
+func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After)
+}
+
+func addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateBucketMetadataTableConfiguration{}, middleware.After)
+}
+
+func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After)
+}
+
+func addOpCreateSessionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateSession{}, middleware.After)
+}
+
+func addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketAnalyticsConfiguration{}, middleware.After)
+}
+
+func addOpDeleteBucketCorsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketCors{}, middleware.After)
+}
+
+func addOpDeleteBucketEncryptionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketEncryption{}, middleware.After)
+}
+
+func addOpDeleteBucketValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucket{}, middleware.After)
+}
+
+func addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After)
+}
+
+func addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketInventoryConfiguration{}, middleware.After)
+}
+
+func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After)
+}
+
+func addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketMetadataTableConfiguration{}, middleware.After)
+}
+
+func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After)
+}
+
+func addOpDeleteBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketOwnershipControls{}, middleware.After)
+}
+
+func addOpDeleteBucketPolicyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketPolicy{}, middleware.After)
+}
+
+func addOpDeleteBucketReplicationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketReplication{}, middleware.After)
+}
+
+func addOpDeleteBucketTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketTagging{}, middleware.After)
+}
+
+func addOpDeleteBucketWebsiteValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBucketWebsite{}, middleware.After)
+}
+
+func addOpDeleteObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteObject{}, middleware.After)
+}
+
+func addOpDeleteObjectsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteObjects{}, middleware.After)
+}
+
+func addOpDeleteObjectTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteObjectTagging{}, middleware.After)
+}
+
+func addOpDeletePublicAccessBlockValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeletePublicAccessBlock{}, middleware.After)
+}
+
+func addOpGetBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketAccelerateConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketAclValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketAcl{}, middleware.After)
+}
+
+func addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketAnalyticsConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketCorsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketCors{}, middleware.After)
+}
+
+func addOpGetBucketEncryptionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketEncryption{}, middleware.After)
+}
+
+func addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketIntelligentTieringConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketInventoryConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketLifecycleConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketLocationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketLocation{}, middleware.After)
+}
+
+func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After)
+}
+
+func addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketMetadataTableConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketNotificationConfiguration{}, middleware.After)
+}
+
+func addOpGetBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketOwnershipControls{}, middleware.After)
+}
+
+func addOpGetBucketPolicyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketPolicy{}, middleware.After)
+}
+
+func addOpGetBucketPolicyStatusValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketPolicyStatus{}, middleware.After)
+}
+
+func addOpGetBucketReplicationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketReplication{}, middleware.After)
+}
+
+func addOpGetBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketRequestPayment{}, middleware.After)
+}
+
+func addOpGetBucketTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketTagging{}, middleware.After)
+}
+
+func addOpGetBucketVersioningValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketVersioning{}, middleware.After)
+}
+
+func addOpGetBucketWebsiteValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetBucketWebsite{}, middleware.After)
+}
+
+func addOpGetObjectAclValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectAcl{}, middleware.After)
+}
+
+func addOpGetObjectAttributesValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectAttributes{}, middleware.After)
+}
+
+func addOpGetObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObject{}, middleware.After)
+}
+
+func addOpGetObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectLegalHold{}, middleware.After)
+}
+
+func addOpGetObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectLockConfiguration{}, middleware.After)
+}
+
+func addOpGetObjectRetentionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectRetention{}, middleware.After)
+}
+
+func addOpGetObjectTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectTagging{}, middleware.After)
+}
+
+func addOpGetObjectTorrentValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetObjectTorrent{}, middleware.After)
+}
+
+func addOpGetPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetPublicAccessBlock{}, middleware.After)
+}
+
+func addOpHeadBucketValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpHeadBucket{}, middleware.After)
+}
+
+func addOpHeadObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpHeadObject{}, middleware.After)
+}
+
+func addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListBucketAnalyticsConfigurations{}, middleware.After)
+}
+
+func addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListBucketIntelligentTieringConfigurations{}, middleware.After)
+}
+
+func addOpListBucketInventoryConfigurationsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListBucketInventoryConfigurations{}, middleware.After)
+}
+
+func addOpListBucketMetricsConfigurationsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListBucketMetricsConfigurations{}, middleware.After)
+}
+
+func addOpListMultipartUploadsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListMultipartUploads{}, middleware.After)
+}
+
+func addOpListObjectsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListObjects{}, middleware.After)
+}
+
+func addOpListObjectsV2ValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListObjectsV2{}, middleware.After)
+}
+
+func addOpListObjectVersionsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListObjectVersions{}, middleware.After)
+}
+
+func addOpListPartsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListParts{}, middleware.After)
+}
+
+func addOpPutBucketAccelerateConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketAccelerateConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketAclValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketAcl{}, middleware.After)
+}
+
+func addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketAnalyticsConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketCorsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketCors{}, middleware.After)
+}
+
+func addOpPutBucketEncryptionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketEncryption{}, middleware.After)
+}
+
+func addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketIntelligentTieringConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketInventoryConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketInventoryConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketLifecycleConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketLifecycleConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketLoggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketLogging{}, middleware.After)
+}
+
+func addOpPutBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketMetricsConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketNotificationConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketNotificationConfiguration{}, middleware.After)
+}
+
+func addOpPutBucketOwnershipControlsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketOwnershipControls{}, middleware.After)
+}
+
+func addOpPutBucketPolicyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketPolicy{}, middleware.After)
+}
+
+func addOpPutBucketReplicationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketReplication{}, middleware.After)
+}
+
+func addOpPutBucketRequestPaymentValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketRequestPayment{}, middleware.After)
+}
+
+func addOpPutBucketTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketTagging{}, middleware.After)
+}
+
+func addOpPutBucketVersioningValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketVersioning{}, middleware.After)
+}
+
+func addOpPutBucketWebsiteValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutBucketWebsite{}, middleware.After)
+}
+
+func addOpPutObjectAclValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectAcl{}, middleware.After)
+}
+
+func addOpPutObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObject{}, middleware.After)
+}
+
+func addOpPutObjectLegalHoldValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectLegalHold{}, middleware.After)
+}
+
+func addOpPutObjectLockConfigurationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectLockConfiguration{}, middleware.After)
+}
+
+func addOpPutObjectRetentionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectRetention{}, middleware.After)
+}
+
+func addOpPutObjectTaggingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutObjectTagging{}, middleware.After)
+}
+
+func addOpPutPublicAccessBlockValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutPublicAccessBlock{}, middleware.After)
+}
+
+func addOpRestoreObjectValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpRestoreObject{}, middleware.After)
+}
+
+func addOpSelectObjectContentValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpSelectObjectContent{}, middleware.After)
+}
+
+func addOpUploadPartCopyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUploadPartCopy{}, middleware.After)
+}
+
+func addOpUploadPartValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUploadPart{}, middleware.After)
+}
+
+func addOpWriteGetObjectResponseValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpWriteGetObjectResponse{}, middleware.After)
+}
+
+func validateAccessControlPolicy(v *types.AccessControlPolicy) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AccessControlPolicy"}
+ if v.Grants != nil {
+ if err := validateGrants(v.Grants); err != nil {
+ invalidParams.AddNested("Grants", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAccessControlTranslation(v *types.AccessControlTranslation) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AccessControlTranslation"}
+ if len(v.Owner) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Owner"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsAndOperator(v *types.AnalyticsAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsConfiguration(v *types.AnalyticsConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsConfiguration"}
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.Filter != nil {
+ if err := validateAnalyticsFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.StorageClassAnalysis == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("StorageClassAnalysis"))
+ } else if v.StorageClassAnalysis != nil {
+ if err := validateStorageClassAnalysis(v.StorageClassAnalysis); err != nil {
+ invalidParams.AddNested("StorageClassAnalysis", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsExportDestination(v *types.AnalyticsExportDestination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsExportDestination"}
+ if v.S3BucketDestination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination"))
+ } else if v.S3BucketDestination != nil {
+ if err := validateAnalyticsS3BucketDestination(v.S3BucketDestination); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsFilter(v types.AnalyticsFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsFilter"}
+ switch uv := v.(type) {
+ case *types.AnalyticsFilterMemberAnd:
+ if err := validateAnalyticsAndOperator(&uv.Value); err != nil {
+ invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError))
+ }
+
+ case *types.AnalyticsFilterMemberTag:
+ if err := validateTag(&uv.Value); err != nil {
+ invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError))
+ }
+
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAnalyticsS3BucketDestination(v *types.AnalyticsS3BucketDestination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AnalyticsS3BucketDestination"}
+ if len(v.Format) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Format"))
+ }
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BucketLifecycleConfiguration"}
+ if v.Rules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Rules"))
+ } else if v.Rules != nil {
+ if err := validateLifecycleRules(v.Rules); err != nil {
+ invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateBucketLoggingStatus(v *types.BucketLoggingStatus) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BucketLoggingStatus"}
+ if v.LoggingEnabled != nil {
+ if err := validateLoggingEnabled(v.LoggingEnabled); err != nil {
+ invalidParams.AddNested("LoggingEnabled", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCORSConfiguration(v *types.CORSConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CORSConfiguration"}
+ if v.CORSRules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("CORSRules"))
+ } else if v.CORSRules != nil {
+ if err := validateCORSRules(v.CORSRules); err != nil {
+ invalidParams.AddNested("CORSRules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCORSRule(v *types.CORSRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CORSRule"}
+ if v.AllowedMethods == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AllowedMethods"))
+ }
+ if v.AllowedOrigins == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AllowedOrigins"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCORSRules(v []types.CORSRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CORSRules"}
+ for i := range v {
+ if err := validateCORSRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDelete(v *types.Delete) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Delete"}
+ if v.Objects == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Objects"))
+ } else if v.Objects != nil {
+ if err := validateObjectIdentifierList(v.Objects); err != nil {
+ invalidParams.AddNested("Objects", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDestination(v *types.Destination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Destination"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.AccessControlTranslation != nil {
+ if err := validateAccessControlTranslation(v.AccessControlTranslation); err != nil {
+ invalidParams.AddNested("AccessControlTranslation", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ReplicationTime != nil {
+ if err := validateReplicationTime(v.ReplicationTime); err != nil {
+ invalidParams.AddNested("ReplicationTime", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Metrics != nil {
+ if err := validateMetrics(v.Metrics); err != nil {
+ invalidParams.AddNested("Metrics", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateEncryption(v *types.Encryption) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Encryption"}
+ if len(v.EncryptionType) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("EncryptionType"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateErrorDocument(v *types.ErrorDocument) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ErrorDocument"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateExistingObjectReplication(v *types.ExistingObjectReplication) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ExistingObjectReplication"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlacierJobParameters(v *types.GlacierJobParameters) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlacierJobParameters"}
+ if len(v.Tier) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Tier"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGrant(v *types.Grant) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Grant"}
+ if v.Grantee != nil {
+ if err := validateGrantee(v.Grantee); err != nil {
+ invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGrantee(v *types.Grantee) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Grantee"}
+ if len(v.Type) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Type"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGrants(v []types.Grant) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Grants"}
+ for i := range v {
+ if err := validateGrant(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateIndexDocument(v *types.IndexDocument) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "IndexDocument"}
+ if v.Suffix == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Suffix"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateIntelligentTieringAndOperator(v *types.IntelligentTieringAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateIntelligentTieringConfiguration(v *types.IntelligentTieringConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringConfiguration"}
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.Filter != nil {
+ if err := validateIntelligentTieringFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if v.Tierings == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Tierings"))
+ } else if v.Tierings != nil {
+ if err := validateTieringList(v.Tierings); err != nil {
+ invalidParams.AddNested("Tierings", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateIntelligentTieringFilter(v *types.IntelligentTieringFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "IntelligentTieringFilter"}
+ if v.Tag != nil {
+ if err := validateTag(v.Tag); err != nil {
+ invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.And != nil {
+ if err := validateIntelligentTieringAndOperator(v.And); err != nil {
+ invalidParams.AddNested("And", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryConfiguration(v *types.InventoryConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryConfiguration"}
+ if v.Destination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Destination"))
+ } else if v.Destination != nil {
+ if err := validateInventoryDestination(v.Destination); err != nil {
+ invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.IsEnabled == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IsEnabled"))
+ }
+ if v.Filter != nil {
+ if err := validateInventoryFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if len(v.IncludedObjectVersions) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("IncludedObjectVersions"))
+ }
+ if v.Schedule == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Schedule"))
+ } else if v.Schedule != nil {
+ if err := validateInventorySchedule(v.Schedule); err != nil {
+ invalidParams.AddNested("Schedule", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryDestination(v *types.InventoryDestination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryDestination"}
+ if v.S3BucketDestination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("S3BucketDestination"))
+ } else if v.S3BucketDestination != nil {
+ if err := validateInventoryS3BucketDestination(v.S3BucketDestination); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryEncryption(v *types.InventoryEncryption) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryEncryption"}
+ if v.SSEKMS != nil {
+ if err := validateSSEKMS(v.SSEKMS); err != nil {
+ invalidParams.AddNested("SSEKMS", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryFilter(v *types.InventoryFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryFilter"}
+ if v.Prefix == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Prefix"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventoryS3BucketDestination(v *types.InventoryS3BucketDestination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventoryS3BucketDestination"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if len(v.Format) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Format"))
+ }
+ if v.Encryption != nil {
+ if err := validateInventoryEncryption(v.Encryption); err != nil {
+ invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateInventorySchedule(v *types.InventorySchedule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "InventorySchedule"}
+ if len(v.Frequency) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Frequency"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLambdaFunctionConfiguration(v *types.LambdaFunctionConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfiguration"}
+ if v.LambdaFunctionArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("LambdaFunctionArn"))
+ }
+ if v.Events == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Events"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLambdaFunctionConfigurationList(v []types.LambdaFunctionConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LambdaFunctionConfigurationList"}
+ for i := range v {
+ if err := validateLambdaFunctionConfiguration(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLifecycleRule(v *types.LifecycleRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LifecycleRule"}
+ if v.Filter != nil {
+ if err := validateLifecycleRuleFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLifecycleRuleFilter(v *types.LifecycleRuleFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"}
+ if v.Tag != nil {
+ if err := validateTag(v.Tag); err != nil {
+ invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.And != nil {
+ if err := validateLifecycleRuleAndOperator(v.And); err != nil {
+ invalidParams.AddNested("And", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLifecycleRules(v []types.LifecycleRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LifecycleRules"}
+ for i := range v {
+ if err := validateLifecycleRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLoggingEnabled(v *types.LoggingEnabled) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LoggingEnabled"}
+ if v.TargetBucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetBucket"))
+ }
+ if v.TargetGrants != nil {
+ if err := validateTargetGrants(v.TargetGrants); err != nil {
+ invalidParams.AddNested("TargetGrants", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.TargetPrefix == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetPrefix"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateMetadataTableConfiguration(v *types.MetadataTableConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "MetadataTableConfiguration"}
+ if v.S3TablesDestination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("S3TablesDestination"))
+ } else if v.S3TablesDestination != nil {
+ if err := validateS3TablesDestination(v.S3TablesDestination); err != nil {
+ invalidParams.AddNested("S3TablesDestination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateMetrics(v *types.Metrics) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Metrics"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateMetricsAndOperator(v *types.MetricsAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "MetricsAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateMetricsConfiguration(v *types.MetricsConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "MetricsConfiguration"}
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.Filter != nil {
+ if err := validateMetricsFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateMetricsFilter(v types.MetricsFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "MetricsFilter"}
+ switch uv := v.(type) {
+ case *types.MetricsFilterMemberAnd:
+ if err := validateMetricsAndOperator(&uv.Value); err != nil {
+ invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError))
+ }
+
+ case *types.MetricsFilterMemberTag:
+ if err := validateTag(&uv.Value); err != nil {
+ invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError))
+ }
+
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateNotificationConfiguration(v *types.NotificationConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "NotificationConfiguration"}
+ if v.TopicConfigurations != nil {
+ if err := validateTopicConfigurationList(v.TopicConfigurations); err != nil {
+ invalidParams.AddNested("TopicConfigurations", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.QueueConfigurations != nil {
+ if err := validateQueueConfigurationList(v.QueueConfigurations); err != nil {
+ invalidParams.AddNested("QueueConfigurations", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.LambdaFunctionConfigurations != nil {
+ if err := validateLambdaFunctionConfigurationList(v.LambdaFunctionConfigurations); err != nil {
+ invalidParams.AddNested("LambdaFunctionConfigurations", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateObjectIdentifier(v *types.ObjectIdentifier) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifier"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateObjectIdentifierList(v []types.ObjectIdentifier) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ObjectIdentifierList"}
+ for i := range v {
+ if err := validateObjectIdentifier(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOutputLocation(v *types.OutputLocation) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "OutputLocation"}
+ if v.S3 != nil {
+ if err := validateS3Location(v.S3); err != nil {
+ invalidParams.AddNested("S3", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOwnershipControls(v *types.OwnershipControls) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "OwnershipControls"}
+ if v.Rules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Rules"))
+ } else if v.Rules != nil {
+ if err := validateOwnershipControlsRules(v.Rules); err != nil {
+ invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOwnershipControlsRule(v *types.OwnershipControlsRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRule"}
+ if len(v.ObjectOwnership) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("ObjectOwnership"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOwnershipControlsRules(v []types.OwnershipControlsRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "OwnershipControlsRules"}
+ for i := range v {
+ if err := validateOwnershipControlsRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateQueueConfiguration(v *types.QueueConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "QueueConfiguration"}
+ if v.QueueArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("QueueArn"))
+ }
+ if v.Events == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Events"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateQueueConfigurationList(v []types.QueueConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "QueueConfigurationList"}
+ for i := range v {
+ if err := validateQueueConfiguration(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRedirectAllRequestsTo(v *types.RedirectAllRequestsTo) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RedirectAllRequestsTo"}
+ if v.HostName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("HostName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaModifications(v *types.ReplicaModifications) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaModifications"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationConfiguration(v *types.ReplicationConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationConfiguration"}
+ if v.Role == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Role"))
+ }
+ if v.Rules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Rules"))
+ } else if v.Rules != nil {
+ if err := validateReplicationRules(v.Rules); err != nil {
+ invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationRule(v *types.ReplicationRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationRule"}
+ if v.Filter != nil {
+ if err := validateReplicationRuleFilter(v.Filter); err != nil {
+ invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if v.SourceSelectionCriteria != nil {
+ if err := validateSourceSelectionCriteria(v.SourceSelectionCriteria); err != nil {
+ invalidParams.AddNested("SourceSelectionCriteria", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ExistingObjectReplication != nil {
+ if err := validateExistingObjectReplication(v.ExistingObjectReplication); err != nil {
+ invalidParams.AddNested("ExistingObjectReplication", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Destination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Destination"))
+ } else if v.Destination != nil {
+ if err := validateDestination(v.Destination); err != nil {
+ invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleAndOperator"}
+ if v.Tags != nil {
+ if err := validateTagSet(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationRuleFilter(v *types.ReplicationRuleFilter) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"}
+ if v.Tag != nil {
+ if err := validateTag(v.Tag); err != nil {
+ invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.And != nil {
+ if err := validateReplicationRuleAndOperator(v.And); err != nil {
+ invalidParams.AddNested("And", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationRules(v []types.ReplicationRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationRules"}
+ for i := range v {
+ if err := validateReplicationRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationTime(v *types.ReplicationTime) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationTime"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if v.Time == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Time"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRequestPaymentConfiguration(v *types.RequestPaymentConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RequestPaymentConfiguration"}
+ if len(v.Payer) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Payer"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRestoreRequest(v *types.RestoreRequest) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RestoreRequest"}
+ if v.GlacierJobParameters != nil {
+ if err := validateGlacierJobParameters(v.GlacierJobParameters); err != nil {
+ invalidParams.AddNested("GlacierJobParameters", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.SelectParameters != nil {
+ if err := validateSelectParameters(v.SelectParameters); err != nil {
+ invalidParams.AddNested("SelectParameters", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.OutputLocation != nil {
+ if err := validateOutputLocation(v.OutputLocation); err != nil {
+ invalidParams.AddNested("OutputLocation", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRoutingRule(v *types.RoutingRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RoutingRule"}
+ if v.Redirect == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Redirect"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateRoutingRules(v []types.RoutingRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RoutingRules"}
+ for i := range v {
+ if err := validateRoutingRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateS3Location(v *types.S3Location) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "S3Location"}
+ if v.BucketName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("BucketName"))
+ }
+ if v.Prefix == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Prefix"))
+ }
+ if v.Encryption != nil {
+ if err := validateEncryption(v.Encryption); err != nil {
+ invalidParams.AddNested("Encryption", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.AccessControlList != nil {
+ if err := validateGrants(v.AccessControlList); err != nil {
+ invalidParams.AddNested("AccessControlList", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Tagging != nil {
+ if err := validateTagging(v.Tagging); err != nil {
+ invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateS3TablesDestination(v *types.S3TablesDestination) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "S3TablesDestination"}
+ if v.TableBucketArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableBucketArn"))
+ }
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateSelectParameters(v *types.SelectParameters) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "SelectParameters"}
+ if v.InputSerialization == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("InputSerialization"))
+ }
+ if len(v.ExpressionType) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("ExpressionType"))
+ }
+ if v.Expression == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Expression"))
+ }
+ if v.OutputSerialization == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateServerSideEncryptionByDefault(v *types.ServerSideEncryptionByDefault) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionByDefault"}
+ if len(v.SSEAlgorithm) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("SSEAlgorithm"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateServerSideEncryptionConfiguration(v *types.ServerSideEncryptionConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionConfiguration"}
+ if v.Rules == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Rules"))
+ } else if v.Rules != nil {
+ if err := validateServerSideEncryptionRules(v.Rules); err != nil {
+ invalidParams.AddNested("Rules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateServerSideEncryptionRule(v *types.ServerSideEncryptionRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRule"}
+ if v.ApplyServerSideEncryptionByDefault != nil {
+ if err := validateServerSideEncryptionByDefault(v.ApplyServerSideEncryptionByDefault); err != nil {
+ invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateServerSideEncryptionRules(v []types.ServerSideEncryptionRule) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ServerSideEncryptionRules"}
+ for i := range v {
+ if err := validateServerSideEncryptionRule(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateSourceSelectionCriteria(v *types.SourceSelectionCriteria) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "SourceSelectionCriteria"}
+ if v.SseKmsEncryptedObjects != nil {
+ if err := validateSseKmsEncryptedObjects(v.SseKmsEncryptedObjects); err != nil {
+ invalidParams.AddNested("SseKmsEncryptedObjects", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ReplicaModifications != nil {
+ if err := validateReplicaModifications(v.ReplicaModifications); err != nil {
+ invalidParams.AddNested("ReplicaModifications", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateSSEKMS(v *types.SSEKMS) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "SSEKMS"}
+ if v.KeyId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("KeyId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateSseKmsEncryptedObjects(v *types.SseKmsEncryptedObjects) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "SseKmsEncryptedObjects"}
+ if len(v.Status) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("Status"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateStorageClassAnalysis(v *types.StorageClassAnalysis) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysis"}
+ if v.DataExport != nil {
+ if err := validateStorageClassAnalysisDataExport(v.DataExport); err != nil {
+ invalidParams.AddNested("DataExport", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateStorageClassAnalysisDataExport(v *types.StorageClassAnalysisDataExport) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "StorageClassAnalysisDataExport"}
+ if len(v.OutputSchemaVersion) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("OutputSchemaVersion"))
+ }
+ if v.Destination == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Destination"))
+ } else if v.Destination != nil {
+ if err := validateAnalyticsExportDestination(v.Destination); err != nil {
+ invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTag(v *types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Tag"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.Value == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Value"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTagging(v *types.Tagging) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Tagging"}
+ if v.TagSet == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TagSet"))
+ } else if v.TagSet != nil {
+ if err := validateTagSet(v.TagSet); err != nil {
+ invalidParams.AddNested("TagSet", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTagSet(v []types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TagSet"}
+ for i := range v {
+ if err := validateTag(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTargetGrant(v *types.TargetGrant) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TargetGrant"}
+ if v.Grantee != nil {
+ if err := validateGrantee(v.Grantee); err != nil {
+ invalidParams.AddNested("Grantee", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTargetGrants(v []types.TargetGrant) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TargetGrants"}
+ for i := range v {
+ if err := validateTargetGrant(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTiering(v *types.Tiering) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Tiering"}
+ if v.Days == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Days"))
+ }
+ if len(v.AccessTier) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessTier"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTieringList(v []types.Tiering) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TieringList"}
+ for i := range v {
+ if err := validateTiering(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTopicConfiguration(v *types.TopicConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TopicConfiguration"}
+ if v.TopicArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TopicArn"))
+ }
+ if v.Events == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Events"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTopicConfigurationList(v []types.TopicConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TopicConfigurationList"}
+ for i := range v {
+ if err := validateTopicConfiguration(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateWebsiteConfiguration(v *types.WebsiteConfiguration) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "WebsiteConfiguration"}
+ if v.ErrorDocument != nil {
+ if err := validateErrorDocument(v.ErrorDocument); err != nil {
+ invalidParams.AddNested("ErrorDocument", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.IndexDocument != nil {
+ if err := validateIndexDocument(v.IndexDocument); err != nil {
+ invalidParams.AddNested("IndexDocument", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.RedirectAllRequestsTo != nil {
+ if err := validateRedirectAllRequestsTo(v.RedirectAllRequestsTo); err != nil {
+ invalidParams.AddNested("RedirectAllRequestsTo", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.RoutingRules != nil {
+ if err := validateRoutingRules(v.RoutingRules); err != nil {
+ invalidParams.AddNested("RoutingRules", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpAbortMultipartUploadInput(v *AbortMultipartUploadInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AbortMultipartUploadInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCompleteMultipartUploadInput(v *CompleteMultipartUploadInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CompleteMultipartUploadInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCopyObjectInput(v *CopyObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CopyObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.CopySource == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("CopySource"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateBucketInput(v *CreateBucketInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateBucketInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateBucketMetadataTableConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.MetadataTableConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("MetadataTableConfiguration"))
+ } else if v.MetadataTableConfiguration != nil {
+ if err := validateMetadataTableConfiguration(v.MetadataTableConfiguration); err != nil {
+ invalidParams.AddNested("MetadataTableConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateMultipartUploadInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateSessionInput(v *CreateSessionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateSessionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketAnalyticsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketCorsInput(v *DeleteBucketCorsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketCorsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketEncryptionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketInput(v *DeleteBucketInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketIntelligentTieringConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketInventoryConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketLifecycleInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetadataTableConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetricsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketOwnershipControlsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketPolicyInput(v *DeleteBucketPolicyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketPolicyInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketReplicationInput(v *DeleteBucketReplicationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketReplicationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketTaggingInput(v *DeleteBucketTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketWebsiteInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteObjectInput(v *DeleteObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteObjectsInput(v *DeleteObjectsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Delete == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Delete"))
+ } else if v.Delete != nil {
+ if err := validateDelete(v.Delete); err != nil {
+ invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteObjectTaggingInput(v *DeleteObjectTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteObjectTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeletePublicAccessBlockInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketAccelerateConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketAclInput(v *GetBucketAclInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketAclInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketAnalyticsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketCorsInput(v *GetBucketCorsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketCorsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketEncryptionInput(v *GetBucketEncryptionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketEncryptionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketIntelligentTieringConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketInventoryConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketLifecycleConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketLocationInput(v *GetBucketLocationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketLocationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketLoggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetadataTableConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetricsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketNotificationConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketOwnershipControlsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketPolicyInput(v *GetBucketPolicyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketPolicyStatusInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketReplicationInput(v *GetBucketReplicationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketReplicationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketRequestPaymentInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketTaggingInput(v *GetBucketTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketVersioningInput(v *GetBucketVersioningInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketVersioningInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetBucketWebsiteInput(v *GetBucketWebsiteInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetBucketWebsiteInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectAclInput(v *GetObjectAclInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectAclInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectAttributesInput(v *GetObjectAttributesInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectAttributesInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.ObjectAttributes == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ObjectAttributes"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectInput(v *GetObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectLegalHoldInput(v *GetObjectLegalHoldInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectLegalHoldInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectLockConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectRetentionInput(v *GetObjectRetentionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectRetentionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectTaggingInput(v *GetObjectTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetObjectTorrentInput(v *GetObjectTorrentInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetObjectTorrentInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetPublicAccessBlockInput(v *GetPublicAccessBlockInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetPublicAccessBlockInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpHeadBucketInput(v *HeadBucketInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "HeadBucketInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpHeadObjectInput(v *HeadObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "HeadObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListBucketAnalyticsConfigurationsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListBucketIntelligentTieringConfigurationsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListBucketInventoryConfigurationsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListBucketMetricsConfigurationsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListMultipartUploadsInput(v *ListMultipartUploadsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListMultipartUploadsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListObjectsInput(v *ListObjectsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListObjectsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListObjectsV2Input(v *ListObjectsV2Input) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListObjectsV2Input"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListObjectVersionsInput(v *ListObjectVersionsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListObjectVersionsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListPartsInput(v *ListPartsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListPartsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketAccelerateConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.AccelerateConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccelerateConfiguration"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketAclInput(v *PutBucketAclInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketAclInput"}
+ if v.AccessControlPolicy != nil {
+ if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketAnalyticsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.AnalyticsConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AnalyticsConfiguration"))
+ } else if v.AnalyticsConfiguration != nil {
+ if err := validateAnalyticsConfiguration(v.AnalyticsConfiguration); err != nil {
+ invalidParams.AddNested("AnalyticsConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketCorsInput(v *PutBucketCorsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketCorsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.CORSConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("CORSConfiguration"))
+ } else if v.CORSConfiguration != nil {
+ if err := validateCORSConfiguration(v.CORSConfiguration); err != nil {
+ invalidParams.AddNested("CORSConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketEncryptionInput(v *PutBucketEncryptionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketEncryptionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.ServerSideEncryptionConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ServerSideEncryptionConfiguration"))
+ } else if v.ServerSideEncryptionConfiguration != nil {
+ if err := validateServerSideEncryptionConfiguration(v.ServerSideEncryptionConfiguration); err != nil {
+ invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketIntelligentTieringConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.IntelligentTieringConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IntelligentTieringConfiguration"))
+ } else if v.IntelligentTieringConfiguration != nil {
+ if err := validateIntelligentTieringConfiguration(v.IntelligentTieringConfiguration); err != nil {
+ invalidParams.AddNested("IntelligentTieringConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketInventoryConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.InventoryConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("InventoryConfiguration"))
+ } else if v.InventoryConfiguration != nil {
+ if err := validateInventoryConfiguration(v.InventoryConfiguration); err != nil {
+ invalidParams.AddNested("InventoryConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketLifecycleConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.LifecycleConfiguration != nil {
+ if err := validateBucketLifecycleConfiguration(v.LifecycleConfiguration); err != nil {
+ invalidParams.AddNested("LifecycleConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketLoggingInput(v *PutBucketLoggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketLoggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.BucketLoggingStatus == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("BucketLoggingStatus"))
+ } else if v.BucketLoggingStatus != nil {
+ if err := validateBucketLoggingStatus(v.BucketLoggingStatus); err != nil {
+ invalidParams.AddNested("BucketLoggingStatus", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketMetricsConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Id == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Id"))
+ }
+ if v.MetricsConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("MetricsConfiguration"))
+ } else if v.MetricsConfiguration != nil {
+ if err := validateMetricsConfiguration(v.MetricsConfiguration); err != nil {
+ invalidParams.AddNested("MetricsConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketNotificationConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.NotificationConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("NotificationConfiguration"))
+ } else if v.NotificationConfiguration != nil {
+ if err := validateNotificationConfiguration(v.NotificationConfiguration); err != nil {
+ invalidParams.AddNested("NotificationConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketOwnershipControlsInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.OwnershipControls == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("OwnershipControls"))
+ } else if v.OwnershipControls != nil {
+ if err := validateOwnershipControls(v.OwnershipControls); err != nil {
+ invalidParams.AddNested("OwnershipControls", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketPolicyInput(v *PutBucketPolicyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketPolicyInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Policy == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Policy"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketReplicationInput(v *PutBucketReplicationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketReplicationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.ReplicationConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ReplicationConfiguration"))
+ } else if v.ReplicationConfiguration != nil {
+ if err := validateReplicationConfiguration(v.ReplicationConfiguration); err != nil {
+ invalidParams.AddNested("ReplicationConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketRequestPaymentInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.RequestPaymentConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RequestPaymentConfiguration"))
+ } else if v.RequestPaymentConfiguration != nil {
+ if err := validateRequestPaymentConfiguration(v.RequestPaymentConfiguration); err != nil {
+ invalidParams.AddNested("RequestPaymentConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketTaggingInput(v *PutBucketTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Tagging == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Tagging"))
+ } else if v.Tagging != nil {
+ if err := validateTagging(v.Tagging); err != nil {
+ invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketVersioningInput(v *PutBucketVersioningInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketVersioningInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.VersioningConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("VersioningConfiguration"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutBucketWebsiteInput(v *PutBucketWebsiteInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutBucketWebsiteInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.WebsiteConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("WebsiteConfiguration"))
+ } else if v.WebsiteConfiguration != nil {
+ if err := validateWebsiteConfiguration(v.WebsiteConfiguration); err != nil {
+ invalidParams.AddNested("WebsiteConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectAclInput(v *PutObjectAclInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectAclInput"}
+ if v.AccessControlPolicy != nil {
+ if err := validateAccessControlPolicy(v.AccessControlPolicy); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectInput(v *PutObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectLegalHoldInput(v *PutObjectLegalHoldInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectLegalHoldInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectLockConfigurationInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectRetentionInput(v *PutObjectRetentionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectRetentionInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutObjectTaggingInput(v *PutObjectTaggingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutObjectTaggingInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.Tagging == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Tagging"))
+ } else if v.Tagging != nil {
+ if err := validateTagging(v.Tagging); err != nil {
+ invalidParams.AddNested("Tagging", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutPublicAccessBlockInput(v *PutPublicAccessBlockInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutPublicAccessBlockInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.PublicAccessBlockConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("PublicAccessBlockConfiguration"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpRestoreObjectInput(v *RestoreObjectInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RestoreObjectInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.RestoreRequest != nil {
+ if err := validateRestoreRequest(v.RestoreRequest); err != nil {
+ invalidParams.AddNested("RestoreRequest", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpSelectObjectContentInput(v *SelectObjectContentInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "SelectObjectContentInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.Expression == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Expression"))
+ }
+ if len(v.ExpressionType) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("ExpressionType"))
+ }
+ if v.InputSerialization == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("InputSerialization"))
+ }
+ if v.OutputSerialization == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("OutputSerialization"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUploadPartCopyInput(v *UploadPartCopyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UploadPartCopyInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.CopySource == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("CopySource"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.PartNumber == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("PartNumber"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUploadPartInput(v *UploadPartInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UploadPartInput"}
+ if v.Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Bucket"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.PartNumber == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("PartNumber"))
+ }
+ if v.UploadId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UploadId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpWriteGetObjectResponseInput(v *WriteGetObjectResponseInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "WriteGetObjectResponseInput"}
+ if v.RequestRoute == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RequestRoute"))
+ }
+ if v.RequestToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RequestToken"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
new file mode 100644
index 000000000..4cb0c9100
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
@@ -0,0 +1,694 @@
+# v1.30.1 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.30.0 (2025-10-30)
+
+* **Feature**: Update endpoint ruleset parameters casing
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.8 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.7 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.6 (2025-09-29)
+
+* No change notes available for this release.
+
+# v1.29.5 (2025-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.4 (2025-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.3 (2025-09-10)
+
+* No change notes available for this release.
+
+# v1.29.2 (2025-09-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.1 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.0 (2025-08-28)
+
+* **Feature**: Remove incorrect endpoint tests
+
+# v1.28.3 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.1 (2025-08-20)
+
+* **Bug Fix**: Remove unused deserialization code.
+
+# v1.28.0 (2025-08-11)
+
+* **Feature**: Add support for configuring per-service Options via callback on global config.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.0 (2025-08-04)
+
+* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.6 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.5 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.4 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.3 (2025-04-03)
+
+* No change notes available for this release.
+
+# v1.25.2 (2025-03-25)
+
+* No change notes available for this release.
+
+# v1.25.1 (2025-03-04.2)
+
+* **Bug Fix**: Add assurance test for operation order.
+
+# v1.25.0 (2025-02-27)
+
+* **Feature**: Track credential providers via User-Agent Feature ids
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.16 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.15 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.14 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.13 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.12 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.24.11 (2025-01-17)
+
+* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop.
+
+# v1.24.10 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.9 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.8 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.7 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.6 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.5 (2024-11-07)
+
+* **Bug Fix**: Adds case-insensitive handling of error message fields in service responses
+
+# v1.24.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.4 (2024-10-03)
+
+* No change notes available for this release.
+
+# v1.23.3 (2024-09-27)
+
+* No change notes available for this release.
+
+# v1.23.2 (2024-09-25)
+
+* No change notes available for this release.
+
+# v1.23.1 (2024-09-23)
+
+* No change notes available for this release.
+
+# v1.23.0 (2024-09-20)
+
+* **Feature**: Add tracing and metrics support to service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.8 (2024-09-17)
+
+* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution.
+
+# v1.22.7 (2024-09-04)
+
+* No change notes available for this release.
+
+# v1.22.6 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.5 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.4 (2024-07-18)
+
+* No change notes available for this release.
+
+# v1.22.3 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.2 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.21.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.12 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.11 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.10 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.9 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.20.8 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.7 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.6 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.20.5 (2024-04-05)
+
+* No change notes available for this release.
+
+# v1.20.4 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.3 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.2 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.19.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.19.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2024-01-18)
+
+* No change notes available for this release.
+
+# v1.18.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.18.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
+# v1.18.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.17.3 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2023-10-02)
+
+* **Feature**: Fix FIPS Endpoints in aws-us-gov.
+
+# v1.14.1 (2023-09-22)
+
+* No change notes available for this release.
+
+# v1.14.0 (2023-09-18)
+
+* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
+* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
+
+# v1.13.6 (2023-08-31)
+
+* No change notes available for this release.
+
+# v1.13.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.13.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.14 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.12.11 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.12.9 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.12.7 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.5 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.12.3 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2023-02-15)
+
+* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.
+* **Bug Fix**: Correct error type parsing for restJson services.
+
+# v1.12.1 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.11.28 (2022-12-20)
+
+* No change notes available for this release.
+
+# v1.11.27 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.26 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.25 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.24 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.23 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.22 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.21 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.20 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.19 (2022-08-30)
+
+* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference.
+
+# v1.11.18 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.17 (2022-08-15)
+
+* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
+
+# v1.11.16 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.15 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.14 (2022-08-08)
+
+* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.13 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.12 (2022-07-11)
+
+* No change notes available for this release.
+
+# v1.11.11 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.10 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.9 (2022-06-16)
+
+* No change notes available for this release.
+
+# v1.11.8 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.7 (2022-05-26)
+
+* No change notes available for this release.
+
+# v1.11.6 (2022-05-25)
+
+* No change notes available for this release.
+
+# v1.11.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-02-24)
+
+* **Feature**: API client updated
+* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Documentation**: Updated API models
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-12-21)
+
+* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens.
+
+# v1.6.2 (2021-12-02)
+
+* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Feature**: Updated service to latest API model.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.2 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
new file mode 100644
index 000000000..2c498e468
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
@@ -0,0 +1,1019 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithydocument "github.com/aws/smithy-go/document"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net"
+ "net/http"
+ "sync/atomic"
+ "time"
+)
+
+const ServiceID = "SSO"
+const ServiceAPIVersion = "2019-06-10"
+
+type operationMetrics struct {
+ Duration metrics.Float64Histogram
+ SerializeDuration metrics.Float64Histogram
+ ResolveIdentityDuration metrics.Float64Histogram
+ ResolveEndpointDuration metrics.Float64Histogram
+ SignRequestDuration metrics.Float64Histogram
+ DeserializeDuration metrics.Float64Histogram
+}
+
+func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram {
+ switch name {
+ case "client.call.duration":
+ return m.Duration
+ case "client.call.serialization_duration":
+ return m.SerializeDuration
+ case "client.call.resolve_identity_duration":
+ return m.ResolveIdentityDuration
+ case "client.call.resolve_endpoint_duration":
+ return m.ResolveEndpointDuration
+ case "client.call.signing_duration":
+ return m.SignRequestDuration
+ case "client.call.deserialization_duration":
+ return m.DeserializeDuration
+ default:
+ panic("unrecognized operation metric")
+ }
+}
+
+func timeOperationMetric[T any](
+ ctx context.Context, metric string, fn func() (T, error),
+ opts ...metrics.RecordMetricOption,
+) (T, error) {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ start := time.Now()
+ v, err := fn()
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ return v, err
+}
+
+func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ var ended bool
+ start := time.Now()
+ return func() {
+ if ended {
+ return
+ }
+ ended = true
+
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ }
+}
+
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
+ }
+}
+
+type operationMetricsKey struct{}
+
+func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) {
+ meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sso")
+ om := &operationMetrics{}
+
+ var err error
+
+ om.Duration, err = operationMetricTimer(meter, "client.call.duration",
+ "Overall call duration (including retries and time to send or receive request and response body)")
+ if err != nil {
+ return nil, err
+ }
+ om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration",
+ "The time it takes to serialize a message body")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration",
+ "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration",
+ "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request")
+ if err != nil {
+ return nil, err
+ }
+ om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration",
+ "The time it takes to sign a request")
+ if err != nil {
+ return nil, err
+ }
+ om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration",
+ "The time it takes to deserialize a message body")
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(parent, operationMetricsKey{}, om), nil
+}
+
+func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) {
+ return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = desc
+ })
+}
+
+func getOperationMetrics(ctx context.Context) *operationMetrics {
+ return ctx.Value(operationMetricsKey{}).(*operationMetrics)
+}
+
+func operationTracer(p tracing.TracerProvider) tracing.Tracer {
+ return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sso")
+}
+
+// Client provides the API client to make operations call for AWS Single Sign-On.
+type Client struct {
+ options Options
+
+ // Difference between the time reported by the server and the client
+ timeOffset *atomic.Int64
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ resolveDefaultLogger(&options)
+
+ setResolvedDefaultsMode(&options)
+
+ resolveRetryer(&options)
+
+ resolveHTTPClient(&options)
+
+ resolveHTTPSignerV4(&options)
+
+ resolveEndpointResolverV2(&options)
+
+ resolveTracerProvider(&options)
+
+ resolveMeterProvider(&options)
+
+ resolveAuthSchemeResolver(&options)
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ finalizeRetryMaxAttempts(&options)
+
+ ignoreAnonymousAuth(&options)
+
+ wrapWithAnonymousAuth(&options)
+
+ resolveAuthSchemes(&options)
+
+ client := &Client{
+ options: options,
+ }
+
+ initializeTimeOffsetResolver(client)
+
+ return client
+}
+
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+ return c.options.Copy()
+}
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
+ ctx = middleware.ClearStackValues(ctx)
+ ctx = middleware.WithServiceID(ctx, ServiceID)
+ ctx = middleware.WithOperationName(ctx, opID)
+
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ finalizeOperationRetryMaxAttempts(&options, *c)
+
+ finalizeClientEndpointResolverOptions(&options)
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ ctx, err = withOperationMetrics(ctx, options.MeterProvider)
+ if err != nil {
+ return nil, metadata, err
+ }
+
+ tracer := operationTracer(options.TracerProvider)
+ spanName := fmt.Sprintf("%s.%s", ServiceID, opID)
+
+ ctx = tracing.WithOperationTracer(ctx, tracer)
+
+ ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) {
+ o.Kind = tracing.SpanKindClient
+ o.Properties.Set("rpc.system", "aws-api")
+ o.Properties.Set("rpc.method", opID)
+ o.Properties.Set("rpc.service", ServiceID)
+ })
+ endTimer := startMetricTimer(ctx, "client.call.duration")
+ defer endTimer()
+ defer span.End()
+
+ handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) {
+ o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso")
+ })
+ decorated := middleware.DecorateHandler(handler, stack)
+ result, metadata, err = decorated.Handle(ctx, params)
+ if err != nil {
+ span.SetProperty("exception.type", fmt.Sprintf("%T", err))
+ span.SetProperty("exception.message", err.Error())
+
+ var aerr smithy.APIError
+ if errors.As(err, &aerr) {
+ span.SetProperty("api.error_code", aerr.ErrorCode())
+ span.SetProperty("api.error_message", aerr.ErrorMessage())
+ span.SetProperty("api.error_fault", aerr.ErrorFault().String())
+ }
+
+ err = &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+
+ span.SetProperty("error", err != nil)
+ if err == nil {
+ span.SetStatus(tracing.SpanStatusOK)
+ } else {
+ span.SetStatus(tracing.SpanStatusError)
+ }
+
+ return result, metadata, err
+}
+
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+ return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+ return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+ return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = setOperationInput(ctx, in.Parameters)
+ return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %v", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %v", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+ if options.AuthSchemeResolver == nil {
+ options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+ }
+}
+
+func resolveAuthSchemes(options *Options) {
+ if options.AuthSchemes == nil {
+ options.AuthSchemes = []smithyhttp.AuthScheme{
+ internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+ Signer: options.HTTPSignerV4,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ }
+ }
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+type legacyEndpointContextSetter struct {
+ LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+ return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.LegacyResolver != nil {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+ }
+
+ return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+ return stack.Initialize.Add(&legacyEndpointContextSetter{
+ LegacyResolver: o.EndpointResolver,
+ }, middleware.Before)
+}
+
+func resolveDefaultLogger(o *Options) {
+ if o.Logger != nil {
+ return
+ }
+ o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+func setResolvedDefaultsMode(o *Options) {
+ if len(o.resolvedDefaultsMode) > 0 {
+ return
+ }
+
+ var mode aws.DefaultsMode
+ mode.SetFromString(string(o.DefaultsMode))
+
+ if mode == aws.DefaultsModeAuto {
+ mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
+ }
+
+ o.resolvedDefaultsMode = mode
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ Region: cfg.Region,
+ DefaultsMode: cfg.DefaultsMode,
+ RuntimeEnvironment: cfg.RuntimeEnvironment,
+ HTTPClient: cfg.HTTPClient,
+ Credentials: cfg.Credentials,
+ APIOptions: cfg.APIOptions,
+ Logger: cfg.Logger,
+ ClientLogMode: cfg.ClientLogMode,
+ AppID: cfg.AppID,
+ AuthSchemePreference: cfg.AuthSchemePreference,
+ }
+ resolveAWSRetryerProvider(cfg, &opts)
+ resolveAWSRetryMaxAttempts(cfg, &opts)
+ resolveAWSRetryMode(cfg, &opts)
+ resolveAWSEndpointResolver(cfg, &opts)
+ resolveInterceptors(cfg, &opts)
+ resolveUseDualStackEndpoint(cfg, &opts)
+ resolveUseFIPSEndpoint(cfg, &opts)
+ resolveBaseEndpoint(cfg, &opts)
+ return New(opts, func(o *Options) {
+ for _, opt := range cfg.ServiceOptions {
+ opt(ServiceID, o)
+ }
+ for _, opt := range optFns {
+ opt(o)
+ }
+ })
+}
+
+func resolveHTTPClient(o *Options) {
+ var buildable *awshttp.BuildableClient
+
+ if o.HTTPClient != nil {
+ var ok bool
+ buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
+ if !ok {
+ return
+ }
+ } else {
+ buildable = awshttp.NewBuildableClient()
+ }
+
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
+ if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
+ dialer.Timeout = dialerTimeout
+ }
+ })
+
+ buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
+ if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
+ transport.TLSHandshakeTimeout = tlsHandshakeTimeout
+ }
+ })
+ }
+
+ o.HTTPClient = buildable
+}
+
+func resolveRetryer(o *Options) {
+ if o.Retryer != nil {
+ return
+ }
+
+ if len(o.RetryMode) == 0 {
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ o.RetryMode = modeConfig.RetryMode
+ }
+ }
+ if len(o.RetryMode) == 0 {
+ o.RetryMode = aws.RetryModeStandard
+ }
+
+ var standardOptions []func(*retry.StandardOptions)
+ if v := o.RetryMaxAttempts; v != 0 {
+ standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
+ so.MaxAttempts = v
+ })
+ }
+
+ switch o.RetryMode {
+ case aws.RetryModeAdaptive:
+ var adaptiveOptions []func(*retry.AdaptiveModeOptions)
+ if len(standardOptions) != 0 {
+ adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
+ ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
+ })
+ }
+ o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
+
+ default:
+ o.Retryer = retry.NewStandard(standardOptions...)
+ }
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+ if cfg.Retryer == nil {
+ return
+ }
+ o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSRetryMode(cfg aws.Config, o *Options) {
+ if len(cfg.RetryMode) == 0 {
+ return
+ }
+ o.RetryMode = cfg.RetryMode
+}
+func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
+ if cfg.RetryMaxAttempts == 0 {
+ return
+ }
+ o.RetryMaxAttempts = cfg.RetryMaxAttempts
+}
+
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
+ if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+ if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
+ return
+ }
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
+}
+
+func resolveInterceptors(cfg aws.Config, o *Options) {
+ o.Interceptors = cfg.Interceptors.Copy()
+}
+
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)
+ if len(options.AppID) > 0 {
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+ }
+
+ return nil
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
+
+type HTTPSignerV4 interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+ if o.HTTPSignerV4 != nil {
+ return
+ }
+ o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+ return v4.NewSigner(func(so *v4.SignerOptions) {
+ so.Logger = o.Logger
+ so.LogSigning = o.ClientLogMode.IsSigning()
+ })
+}
+
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+
+func addSpanRetryLoop(stack *middleware.Stack, options Options) error {
+ return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before)
+}
+
+type spanRetryLoop struct {
+ options Options
+}
+
+func (*spanRetryLoop) ID() string {
+ return "spanRetryLoop"
+}
+
+func (m *spanRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ middleware.FinalizeOutput, middleware.Metadata, error,
+) {
+ tracer := operationTracer(m.options.TracerProvider)
+ ctx, span := tracer.StartSpan(ctx, "RetryLoop")
+ defer span.End()
+
+ return next.HandleFinalize(ctx, in)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+ return nil
+ })
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+ return nil
+ })
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso")
+ })
+ if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
+ }
+ return nil
+}
+
+// resolves dual-stack endpoint configuration
+func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseDualStackEndpoint = value
+ }
+ return nil
+}
+
+// resolves FIPS endpoint configuration
+func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseFIPSEndpoint = value
+ }
+ return nil
+}
+
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+ if mode == aws.AccountIDEndpointModeDisabled {
+ return nil
+ }
+
+ if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+ return aws.String(ca.Credentials.AccountID)
+ }
+
+ return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+ mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+ if err := stack.Build.Add(&mw, middleware.After); err != nil {
+ return err
+ }
+ return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+ c.timeOffset = new(atomic.Int64)
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ switch options.Retryer.(type) {
+ case *retry.Standard:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+ case *retry.AdaptiveMode:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+ }
+ return nil
+}
+
+type setCredentialSourceMiddleware struct {
+ ua *awsmiddleware.RequestUserAgent
+ options Options
+}
+
+func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" }
+
+func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource)
+ if !ok {
+ return next.HandleBuild(ctx, in)
+ }
+ providerSources := asProviderSource.ProviderSources()
+ for _, source := range providerSources {
+ m.ua.AddCredentialsSource(source)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+func addCredentialSource(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ mw := setCredentialSourceMiddleware{ua: ua, options: options}
+ return stack.Build.Insert(&mw, "UserAgent", middleware.Before)
+}
+
+func resolveTracerProvider(options *Options) {
+ if options.TracerProvider == nil {
+ options.TracerProvider = &tracing.NopTracerProvider{}
+ }
+}
+
+func resolveMeterProvider(options *Options) {
+ if options.MeterProvider == nil {
+ options.MeterProvider = metrics.NopMeterProvider{}
+ }
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
+func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+ return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: o.ClientLogMode.IsRequest(),
+ LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
+ LogResponse: o.ClientLogMode.IsResponse(),
+ LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+}
+
+type disableHTTPSMiddleware struct {
+ DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+ return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+ req.URL.Scheme = "http"
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Finalize.Insert(&disableHTTPSMiddleware{
+ DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{
+ Interceptors: opts.Interceptors.BeforeRetryLoop,
+ }, "Retry", middleware.Before)
+}
+
+func addInterceptAttempt(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{
+ BeforeAttempt: opts.Interceptors.BeforeAttempt,
+ AfterAttempt: opts.Interceptors.AfterAttempt,
+ }, "Retry", middleware.After)
+}
+
+func addInterceptExecution(stack *middleware.Stack, opts Options) error {
+ return stack.Initialize.Add(&smithyhttp.InterceptExecution{
+ BeforeExecution: opts.Interceptors.BeforeExecution,
+ AfterExecution: opts.Interceptors.AfterExecution,
+ }, middleware.Before)
+}
+
+func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error {
+ return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{
+ Interceptors: opts.Interceptors.BeforeSerialization,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error {
+ return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{
+ Interceptors: opts.Interceptors.AfterSerialization,
+ }, "OperationSerializer", middleware.After)
+}
+
+func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{
+ Interceptors: opts.Interceptors.BeforeSigning,
+ }, "Signing", middleware.Before)
+}
+
+func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{
+ Interceptors: opts.Interceptors.AfterSigning,
+ }, "Signing", middleware.After)
+}
+
+func addInterceptTransmit(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{
+ BeforeTransmit: opts.Interceptors.BeforeTransmit,
+ AfterTransmit: opts.Interceptors.AfterTransmit,
+ }, middleware.After)
+}
+
+func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{
+ Interceptors: opts.Interceptors.BeforeDeserialization,
+ }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse)
+}
+
+func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{
+ Interceptors: opts.Interceptors.AfterDeserialization,
+ }, "OperationDeserializer", middleware.Before)
+}
+
+type spanInitializeStart struct {
+}
+
+func (*spanInitializeStart) ID() string {
+ return "spanInitializeStart"
+}
+
+func (m *spanInitializeStart) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "Initialize")
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanInitializeEnd struct {
+}
+
+func (*spanInitializeEnd) ID() string {
+ return "spanInitializeEnd"
+}
+
+func (m *spanInitializeEnd) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanBuildRequestStart struct {
+}
+
+func (*spanBuildRequestStart) ID() string {
+ return "spanBuildRequestStart"
+}
+
+func (m *spanBuildRequestStart) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ middleware.SerializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "BuildRequest")
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type spanBuildRequestEnd struct {
+}
+
+func (*spanBuildRequestEnd) ID() string {
+ return "spanBuildRequestEnd"
+}
+
+func (m *spanBuildRequestEnd) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ middleware.BuildOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleBuild(ctx, in)
+}
+
+func addSpanInitializeStart(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before)
+}
+
+func addSpanInitializeEnd(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After)
+}
+
+func addSpanBuildRequestStart(stack *middleware.Stack) error {
+ return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before)
+}
+
+func addSpanBuildRequestEnd(stack *middleware.Stack) error {
+ return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
new file mode 100644
index 000000000..df5dc1674
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
@@ -0,0 +1,201 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sso/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the STS short-term credentials for a given role name that is assigned
+// to the user.
+func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) {
+ if params == nil {
+ params = &GetRoleCredentialsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetRoleCredentialsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetRoleCredentialsInput struct {
+
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
+ //
+ // This member is required.
+ AccessToken *string
+
+ // The identifier for the AWS account that is assigned to the user.
+ //
+ // This member is required.
+ AccountId *string
+
+ // The friendly name of the role that is assigned to the user.
+ //
+ // This member is required.
+ RoleName *string
+
+ noSmithyDocumentSerde
+}
+
+type GetRoleCredentialsOutput struct {
+
+ // The credentials for the role that is assigned to the user.
+ RoleCredentials *types.RoleCredentials
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetRoleCredentials"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetRoleCredentials",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
new file mode 100644
index 000000000..2a3b2ad90
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
@@ -0,0 +1,299 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sso/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists all roles that are assigned to the user for a given AWS account.
+func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) {
+ if params == nil {
+ params = &ListAccountRolesInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListAccountRolesOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListAccountRolesInput struct {
+
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
+ //
+ // This member is required.
+ AccessToken *string
+
+ // The identifier for the AWS account that is assigned to the user.
+ //
+ // This member is required.
+ AccountId *string
+
+ // The number of items that clients can request per page.
+ MaxResults *int32
+
+ // The page token from the previous response output when you request subsequent
+ // pages.
+ NextToken *string
+
+ noSmithyDocumentSerde
+}
+
+type ListAccountRolesOutput struct {
+
+ // The page token client that is used to retrieve the list of accounts.
+ NextToken *string
+
+ // A paginated response with the list of roles and the next token if more results
+ // are available.
+ RoleList []types.RoleInfo
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccountRoles"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListAccountRolesValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles
+type ListAccountRolesPaginatorOptions struct {
+ // The number of items that clients can request per page.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListAccountRolesPaginator is a paginator for ListAccountRoles
+type ListAccountRolesPaginator struct {
+ options ListAccountRolesPaginatorOptions
+ client ListAccountRolesAPIClient
+ params *ListAccountRolesInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator
+func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator {
+ if params == nil {
+ params = &ListAccountRolesInput{}
+ }
+
+ options := ListAccountRolesPaginatorOptions{}
+ if params.MaxResults != nil {
+ options.Limit = *params.MaxResults
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListAccountRolesPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.NextToken,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListAccountRolesPaginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListAccountRoles page.
+func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.NextToken = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.MaxResults = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.NextToken
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+// ListAccountRolesAPIClient is a client that implements the ListAccountRoles
+// operation.
+type ListAccountRolesAPIClient interface {
+ ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error)
+}
+
+var _ ListAccountRolesAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListAccountRoles",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
new file mode 100644
index 000000000..f6114a7c1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
@@ -0,0 +1,297 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sso/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by
+// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity
+// Center User Guide. This operation returns a paginated response.
+//
+// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers
+func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) {
+ if params == nil {
+ params = &ListAccountsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListAccountsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListAccountsInput struct {
+
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
+ //
+ // This member is required.
+ AccessToken *string
+
+ // This is the number of items clients can request per page.
+ MaxResults *int32
+
+ // (Optional) When requesting subsequent pages, this is the page token from the
+ // previous response output.
+ NextToken *string
+
+ noSmithyDocumentSerde
+}
+
+type ListAccountsOutput struct {
+
+ // A paginated response with the list of account information and the next token if
+ // more results are available.
+ AccountList []types.AccountInfo
+
+ // The page token client that is used to retrieve the list of accounts.
+ NextToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccounts"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListAccountsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListAccountsPaginatorOptions is the paginator options for ListAccounts
+type ListAccountsPaginatorOptions struct {
+ // This is the number of items clients can request per page.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListAccountsPaginator is a paginator for ListAccounts
+type ListAccountsPaginator struct {
+ options ListAccountsPaginatorOptions
+ client ListAccountsAPIClient
+ params *ListAccountsInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListAccountsPaginator returns a new ListAccountsPaginator
+func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator {
+ if params == nil {
+ params = &ListAccountsInput{}
+ }
+
+ options := ListAccountsPaginatorOptions{}
+ if params.MaxResults != nil {
+ options.Limit = *params.MaxResults
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListAccountsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.NextToken,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListAccountsPaginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListAccounts page.
+func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.NextToken = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.MaxResults = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListAccounts(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.NextToken
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+// ListAccountsAPIClient is a client that implements the ListAccounts operation.
+type ListAccountsAPIClient interface {
+ ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error)
+}
+
+var _ ListAccountsAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListAccounts",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
new file mode 100644
index 000000000..2c7f181c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
@@ -0,0 +1,200 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Removes the locally stored SSO tokens from the client-side cache and sends an
+// API call to the IAM Identity Center service to invalidate the corresponding
+// server-side IAM Identity Center sign in session.
+//
+// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM
+// Identity Center sign in session is used to obtain an IAM session, as specified
+// in the corresponding IAM Identity Center permission set. More specifically, IAM
+// Identity Center assumes an IAM role in the target account on behalf of the user,
+// and the corresponding temporary AWS credentials are returned to the client.
+//
+// After user logout, any existing IAM role sessions that were created by using
+// IAM Identity Center permission sets continue based on the duration configured in
+// the permission set. For more information, see [User authentications]in the IAM Identity Center User
+// Guide.
+//
+// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html
+func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) {
+ if params == nil {
+ params = &LogoutInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*LogoutOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type LogoutInput struct {
+
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
+ //
+ // This member is required.
+ AccessToken *string
+
+ noSmithyDocumentSerde
+}
+
+type LogoutOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "Logout"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpLogoutValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "Logout",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go
new file mode 100644
index 000000000..708e53c5a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go
@@ -0,0 +1,363 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "slices"
+ "strings"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+ params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+ return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+ }
+ }
+
+ if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+ // The name of the operation being invoked.
+ Operation string
+
+ // The region in which the operation is being invoked.
+ Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+ params := &AuthResolverParameters{
+ Operation: operation,
+ }
+
+ bindAuthParamsRegion(ctx, params, input, options)
+
+ return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+ ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ if overrides, ok := operationAuthOptions[params.Operation]; ok {
+ return overrides(params), nil
+ }
+ return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+ "GetRoleCredentials": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "ListAccountRoles": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "ListAccounts": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "Logout": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {
+ SchemeID: smithyauth.SchemeIDSigV4,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4SigningName(&props, "awsssoportal")
+ smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+ return props
+ }(),
+ },
+ }
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveAuthScheme")
+ defer span.End()
+
+ params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+ options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+ }
+
+ scheme, ok := m.selectScheme(options)
+ if !ok {
+ return out, metadata, fmt.Errorf("could not select an auth scheme")
+ }
+
+ ctx = setResolvedAuthScheme(ctx, scheme)
+
+ span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID())
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+ sorted := sortAuthOptions(options, m.options.AuthSchemePreference)
+ for _, option := range sorted {
+ if option.SchemeID == smithyauth.SchemeIDAnonymous {
+ return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+ }
+
+ for _, scheme := range m.options.AuthSchemes {
+ if scheme.SchemeID() != option.SchemeID {
+ continue
+ }
+
+ if scheme.IdentityResolver(m.options) != nil {
+ return newResolvedAuthScheme(scheme, option), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option {
+ byPriority := make([]*smithyauth.Option, 0, len(options))
+ for _, prefName := range preferred {
+ for _, option := range options {
+ optName := option.SchemeID
+ if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 {
+ optName = parts[1]
+ }
+ if prefName == optName {
+ byPriority = append(byPriority, option)
+ }
+ }
+ }
+ for _, option := range options {
+ if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool {
+ return o.SchemeID == option.SchemeID
+ }) {
+ byPriority = append(byPriority, option)
+ }
+ }
+ return byPriority
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+ Scheme smithyhttp.AuthScheme
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+ return &resolvedAuthScheme{
+ Scheme: scheme,
+ IdentityProperties: option.IdentityProperties,
+ SignerProperties: option.SignerProperties,
+ }
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+ return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+ v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+ return v
+}
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ innerCtx, span := tracing.StartSpan(ctx, "GetIdentity")
+ defer span.End()
+
+ rscheme := getResolvedAuthScheme(innerCtx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ resolver := rscheme.Scheme.IdentityResolver(m.options)
+ if resolver == nil {
+ return out, metadata, fmt.Errorf("no identity resolver")
+ }
+
+ identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration",
+ func() (smithyauth.Identity, error) {
+ return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties)
+ },
+ func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("get identity: %w", err)
+ }
+
+ ctx = setIdentity(ctx, identity)
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+ return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+ v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+ return v
+}
+
+type signRequestMiddleware struct {
+ options Options
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "SignRequest")
+ defer span.End()
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ identity := getIdentity(ctx)
+ if identity == nil {
+ return out, metadata, fmt.Errorf("no identity")
+ }
+
+ signer := rscheme.Scheme.Signer()
+ if signer == nil {
+ return out, metadata, fmt.Errorf("no signer")
+ }
+
+ _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) {
+ return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties)
+ }, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("sign request: %w", err)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
new file mode 100644
index 000000000..a889f3c7a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
@@ -0,0 +1,1172 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson"
+ "github.com/aws/aws-sdk-go-v2/service/sso/types"
+ smithy "github.com/aws/smithy-go"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+type awsRestjson1_deserializeOpGetRoleCredentials struct {
+}
+
+func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata)
+ }
+ output := &GetRoleCredentialsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("TooManyRequestsException", errorCode):
+ return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *GetRoleCredentialsOutput
+ if *v == nil {
+ sv = &GetRoleCredentialsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "roleCredentials":
+ if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestjson1_deserializeOpListAccountRoles struct {
+}
+
+func (*awsRestjson1_deserializeOpListAccountRoles) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata)
+ }
+ output := &ListAccountRolesOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("TooManyRequestsException", errorCode):
+ return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListAccountRolesOutput
+ if *v == nil {
+ sv = &ListAccountRolesOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "nextToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value)
+ }
+ sv.NextToken = ptr.String(jtv)
+ }
+
+ case "roleList":
+ if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestjson1_deserializeOpListAccounts struct {
+}
+
+func (*awsRestjson1_deserializeOpListAccounts) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata)
+ }
+ output := &ListAccountsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("TooManyRequestsException", errorCode):
+ return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListAccountsOutput
+ if *v == nil {
+ sv = &ListAccountsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accountList":
+ if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil {
+ return err
+ }
+
+ case "nextToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value)
+ }
+ sv.NextToken = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestjson1_deserializeOpLogout struct {
+}
+
+func (*awsRestjson1_deserializeOpLogout) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata)
+ }
+ output := &LogoutOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("TooManyRequestsException", errorCode):
+ return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidRequestException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.ResourceNotFoundException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.TooManyRequestsException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.UnauthorizedException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AccountInfo
+ if *v == nil {
+ sv = &types.AccountInfo{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accountId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value)
+ }
+ sv.AccountId = ptr.String(jtv)
+ }
+
+ case "accountName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value)
+ }
+ sv.AccountName = ptr.String(jtv)
+ }
+
+ case "emailAddress":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value)
+ }
+ sv.EmailAddress = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.AccountInfo
+ if *v == nil {
+ cv = []types.AccountInfo{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.AccountInfo
+ destAddr := &col
+ if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidRequestException
+ if *v == nil {
+ sv = &types.InvalidRequestException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ResourceNotFoundException
+ if *v == nil {
+ sv = &types.ResourceNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.RoleCredentials
+ if *v == nil {
+ sv = &types.RoleCredentials{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accessKeyId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value)
+ }
+ sv.AccessKeyId = ptr.String(jtv)
+ }
+
+ case "expiration":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.Expiration = i64
+ }
+
+ case "secretAccessKey":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value)
+ }
+ sv.SecretAccessKey = ptr.String(jtv)
+ }
+
+ case "sessionToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value)
+ }
+ sv.SessionToken = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.RoleInfo
+ if *v == nil {
+ sv = &types.RoleInfo{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accountId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value)
+ }
+ sv.AccountId = ptr.String(jtv)
+ }
+
+ case "roleName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value)
+ }
+ sv.RoleName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.RoleInfo
+ if *v == nil {
+ cv = []types.RoleInfo{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.RoleInfo
+ destAddr := &col
+ if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TooManyRequestsException
+ if *v == nil {
+ sv = &types.TooManyRequestsException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.UnauthorizedException
+ if *v == nil {
+ sv = &types.UnauthorizedException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
new file mode 100644
index 000000000..7f6e429fd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
@@ -0,0 +1,27 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package sso provides the API client, operations, and parameter types for AWS
+// Single Sign-On.
+//
+// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web
+// service that makes it easy for you to assign user access to IAM Identity Center
+// resources such as the AWS access portal. Users can get AWS account applications
+// and roles assigned to them and get federated into the application.
+//
+// Although AWS Single Sign-On was renamed, the sso and identitystore API
+// namespaces will continue to retain their original name for backward
+// compatibility purposes. For more information, see [IAM Identity Center rename].
+//
+// This reference guide describes the IAM Identity Center Portal operations that
+// you can call programatically and includes detailed information on data types and
+// errors.
+//
+// AWS provides SDKs that consist of libraries and sample code for various
+// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android.
+// The SDKs provide a convenient way to create programmatic access to IAM Identity
+// Center and other AWS services. For more information about the AWS SDKs,
+// including how to download and install them, see [Tools for Amazon Web Services].
+//
+// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/
+// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed
+package sso
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
new file mode 100644
index 000000000..2b22ab779
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
@@ -0,0 +1,558 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+ return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return fn(region, options)
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+ e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+ for _, fn := range optFns {
+ fn(&e)
+ }
+
+ return EndpointResolverFunc(
+ func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+ if len(e.SigningRegion) == 0 {
+ e.SigningRegion = region
+ }
+ return e, nil
+ },
+ )
+}
+
+type ResolveEndpoint struct {
+ Resolver EndpointResolver
+ Options EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.Resolver == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ eo := m.Options
+ eo.Logger = middleware.GetLogger(ctx)
+
+ var endpoint aws.Endpoint
+ endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
+ if err != nil {
+ nf := (&aws.EndpointNotFoundError{})
+ if errors.As(err, &nf) {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+ return next.HandleSerialize(ctx, in)
+ }
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+ signingName := endpoint.SigningName
+ if len(signingName) == 0 {
+ signingName = "awsssoportal"
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ }
+ ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+ ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+ return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Serialize.Insert(&ResolveEndpoint{
+ Resolver: o.EndpointResolver,
+ Options: o.EndpointOptions,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+ return err
+}
+
+type wrappedEndpointResolver struct {
+ awsResolver aws.EndpointResolverWithOptions
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
+}
+
+type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
+
+func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
+ return a(service, region)
+}
+
+var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
+
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
+//
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
+ var resolver aws.EndpointResolverWithOptions
+
+ if awsResolverWithOptions != nil {
+ resolver = awsResolverWithOptions
+ } else if awsResolver != nil {
+ resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint)
+ }
+
+ return &wrappedEndpointResolver{
+ awsResolver: resolver,
+ }
+}
+
+func finalizeClientEndpointResolverOptions(options *Options) {
+ options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage()
+
+ if len(options.EndpointOptions.ResolvedRegion) == 0 {
+ const fipsInfix = "-fips-"
+ const fipsPrefix = "fips-"
+ const fipsSuffix = "-fips"
+
+ if strings.Contains(options.Region, fipsInfix) ||
+ strings.Contains(options.Region, fipsPrefix) ||
+ strings.Contains(options.Region, fipsSuffix) {
+ options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(
+ options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "")
+ options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+ }
+ }
+
+}
+
+func resolveEndpointResolverV2(options *Options) {
+ if options.EndpointResolverV2 == nil {
+ options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+ }
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+ if cfg.BaseEndpoint != nil {
+ o.BaseEndpoint = cfg.BaseEndpoint
+ }
+
+ _, g := os.LookupEnv("AWS_ENDPOINT_URL")
+ _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO")
+
+ if g && !s {
+ return
+ }
+
+ value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources)
+ if found && err == nil {
+ o.BaseEndpoint = &value
+ }
+}
+
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+ // The AWS region used to dispatch the request.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Region
+ Region *string
+
+ // When true, use the dual-stack endpoint. If the configured endpoint does not
+ // support dual-stack, dispatching the request MAY return an error.
+ //
+ // Defaults to
+ // false if no value is provided.
+ //
+ // AWS::UseDualStack
+ UseDualStack *bool
+
+ // When true, send this request to the FIPS-compliant regional endpoint. If the
+ // configured endpoint does not have a FIPS compliant endpoint, dispatching the
+ // request will return an error.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::UseFIPS
+ UseFIPS *bool
+
+ // Override the endpoint used to send this request
+ //
+ // Parameter is
+ // required.
+ //
+ // SDK::Endpoint
+ Endpoint *string
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+ if p.UseDualStack == nil {
+ return fmt.Errorf("parameter UseDualStack is required")
+ }
+
+ if p.UseFIPS == nil {
+ return fmt.Errorf("parameter UseFIPS is required")
+ }
+
+ return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+ if p.UseDualStack == nil {
+ p.UseDualStack = ptr.Bool(false)
+ }
+
+ if p.UseFIPS == nil {
+ p.UseFIPS = ptr.Bool(false)
+ }
+ return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+ // ResolveEndpoint attempts to resolve the endpoint with the provided options,
+ // returning the endpoint if found. Otherwise an error is returned.
+ ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+ smithyendpoints.Endpoint, error,
+ )
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+ return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+ ctx context.Context, params EndpointParameters,
+) (
+ endpoint smithyendpoints.Endpoint, err error,
+) {
+ params = params.WithDefaults()
+ if err = params.ValidateRequired(); err != nil {
+ return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+ }
+ _UseDualStack := *params.UseDualStack
+ _ = _UseDualStack
+ _UseFIPS := *params.UseFIPS
+ _ = _UseFIPS
+
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+ }
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+ }
+ uriString := _Endpoint
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _PartitionResult := *exprVal
+ _ = _PartitionResult
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsFIPS {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+ }
+ }
+ if _UseFIPS == true {
+ if _PartitionResult.SupportsFIPS == true {
+ if _PartitionResult.Name == "aws-us-gov" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+ }
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+ bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+ params := &EndpointParameters{}
+
+ params.Region = bindRegion(options.Region)
+ params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+ params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+ params.Endpoint = options.BaseEndpoint
+
+ if b, ok := input.(endpointParamsBinder); ok {
+ b.bindEndpointParams(params)
+ }
+
+ return params
+}
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveEndpoint")
+ defer span.End()
+
+ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.options.EndpointResolverV2 == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+ endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration",
+ func() (smithyendpoints.Endpoint, error) {
+ return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ span.SetProperty("client.call.resolved_endpoint", endpt.URI.String())
+
+ if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+ endpt.URI.RawPath = endpt.URI.Path
+ }
+ req.URL.Scheme = endpt.URI.Scheme
+ req.URL.Host = endpt.URI.Host
+ req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+ req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+ for k := range endpt.Headers {
+ req.Header.Set(k, endpt.Headers.Get(k))
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+ for _, o := range opts {
+ rscheme.SignerProperties.SetAll(&o.SignerProperties)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
new file mode 100644
index 000000000..1499c0a95
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
@@ -0,0 +1,36 @@
+{
+ "dependencies": {
+ "github.com/aws/aws-sdk-go-v2": "v1.4.0",
+ "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
+ "github.com/aws/smithy-go": "v1.4.0"
+ },
+ "files": [
+ "api_client.go",
+ "api_client_test.go",
+ "api_op_GetRoleCredentials.go",
+ "api_op_ListAccountRoles.go",
+ "api_op_ListAccounts.go",
+ "api_op_Logout.go",
+ "auth.go",
+ "deserializers.go",
+ "doc.go",
+ "endpoints.go",
+ "endpoints_config_test.go",
+ "endpoints_test.go",
+ "generated.json",
+ "internal/endpoints/endpoints.go",
+ "internal/endpoints/endpoints_test.go",
+ "options.go",
+ "protocol_test.go",
+ "serializers.go",
+ "snapshot_test.go",
+ "sra_operation_order_test.go",
+ "types/errors.go",
+ "types/types.go",
+ "validators.go"
+ ],
+ "go": "1.23",
+ "module": "github.com/aws/aws-sdk-go-v2/service/sso",
+ "unstable": false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
new file mode 100644
index 000000000..c84f88075
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package sso
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.30.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..8bb8730be
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
@@ -0,0 +1,603 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"
+ "github.com/aws/smithy-go/logging"
+ "regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+ // Logger is a logging implementation that log events should be sent to.
+ Logger logging.Logger
+
+ // LogDeprecated indicates that deprecated endpoints should be logged to the
+ // provided logger.
+ LogDeprecated bool
+
+ // ResolvedRegion is used to override the region to be resolved, rather then the
+ // using the value passed to the ResolveEndpoint method. This value is used by the
+ // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative
+ // name. You must not set this value directly in your application.
+ ResolvedRegion string
+
+ // DisableHTTPS informs the resolver to return an endpoint that does not use the
+ // HTTPS scheme.
+ DisableHTTPS bool
+
+ // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint.
+ UseDualStackEndpoint aws.DualStackEndpointState
+
+ // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+ UseFIPSEndpoint aws.FIPSEndpointState
+}
+
+func (o Options) GetResolvedRegion() string {
+ return o.ResolvedRegion
+}
+
+func (o Options) GetDisableHTTPS() bool {
+ return o.DisableHTTPS
+}
+
+func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState {
+ return o.UseDualStackEndpoint
+}
+
+func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState {
+ return o.UseFIPSEndpoint
+}
+
+func transformToSharedOptions(options Options) endpoints.Options {
+ return endpoints.Options{
+ Logger: options.Logger,
+ LogDeprecated: options.LogDeprecated,
+ ResolvedRegion: options.ResolvedRegion,
+ DisableHTTPS: options.DisableHTTPS,
+ UseDualStackEndpoint: options.UseDualStackEndpoint,
+ UseFIPSEndpoint: options.UseFIPSEndpoint,
+ }
+}
+
+// Resolver SSO endpoint resolver
+type Resolver struct {
+ partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+ if len(region) == 0 {
+ return endpoint, &aws.MissingRegionError{}
+ }
+
+ opt := transformToSharedOptions(options)
+ return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+}
+
+var partitionRegexp = struct {
+ Aws *regexp.Regexp
+ AwsCn *regexp.Regexp
+ AwsEusc *regexp.Regexp
+ AwsIso *regexp.Regexp
+ AwsIsoB *regexp.Regexp
+ AwsIsoE *regexp.Regexp
+ AwsIsoF *regexp.Regexp
+ AwsUsGov *regexp.Regexp
+}{
+
+ Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"),
+ AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"),
+ AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+ AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+ AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
+ AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+}
+
+var defaultPartitions = endpoints.Partitions{
+ {
+ ID: "aws",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "portal.sso.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.Aws,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "af-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.af-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-northeast-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-northeast-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-3",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-northeast-3.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-south-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-south-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-south-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-3",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-3.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-4.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-5",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-5.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-5",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-7",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ca-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ca-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-central-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-central-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-north-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-north-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-south-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-south-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-3",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-west-3.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "il-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.il-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.me-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "me-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.me-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "mx-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "sa-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.sa-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.us-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.us-east-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.us-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.us-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "portal.sso.{region}.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsCn,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "cn-north-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.cn-north-1.amazonaws.com.cn",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "cn-northwest-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ {
+ ID: "aws-eusc",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsEusc,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIso,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-b",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.sc2s.sgov.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.sc2s.sgov.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoB,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-e",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoE,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-f",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoF,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-us-gov",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "portal.sso.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsUsGov,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.us-gov-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.us-gov-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
new file mode 100644
index 000000000..277550af4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
@@ -0,0 +1,239 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+)
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // The optional application specific identifier appended to the User-Agent header.
+ AppID string
+
+ // This endpoint will be given as input to an EndpointResolverV2. It is used for
+ // providing a custom base endpoint that is subject to modifications by the
+ // processing EndpointResolverV2.
+ BaseEndpoint *string
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The configuration DefaultsMode that the SDK should use when constructing the
+ // clients initial default settings.
+ DefaultsMode aws.DefaultsMode
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ //
+ // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+ // value for this field will likely prevent you from using any endpoint-related
+ // service features released after the introduction of EndpointResolverV2 and
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
+ EndpointResolver EndpointResolver
+
+ // Resolves the endpoint used for a particular service operation. This should be
+ // used over the deprecated EndpointResolver.
+ EndpointResolverV2 EndpointResolverV2
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The client meter provider.
+ MeterProvider metrics.MeterProvider
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client will call
+ // an operation that fails with a retryable error. A value of 0 is ignored, and
+ // will not be used to configure the API client created default retryer, or modify
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry mode the API client will be created with, if
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ RetryMode aws.RetryMode
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer. The kind of
+ // default retry created by the API client can be changed with the RetryMode
+ // option.
+ Retryer aws.Retryer
+
+ // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+ // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+ // should not populate this structure programmatically, or rely on the values here
+ // within your applications.
+ RuntimeEnvironment aws.RuntimeEnvironment
+
+ // The client tracer provider.
+ TracerProvider tracing.TracerProvider
+
+ // The initial DefaultsMode used when the client options were constructed. If the
+ // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ resolvedDefaultsMode aws.DefaultsMode
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // Client registry of operation interceptors.
+ Interceptors smithyhttp.InterceptorRegistry
+
+ // The auth scheme resolver which determines how to authenticate for each
+ // operation.
+ AuthSchemeResolver AuthSchemeResolver
+
+ // The list of auth schemes supported by the client.
+ AuthSchemes []smithyhttp.AuthScheme
+
+ // Priority list of preferred auth scheme names (e.g. sigv4a).
+ AuthSchemePreference []string
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+ to.Interceptors = o.Interceptors.Copy()
+
+ return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+ if schemeID == "aws.auth#sigv4" {
+ return getSigV4IdentityResolver(o)
+ }
+ if schemeID == "smithy.api#noAuth" {
+ return &smithyauth.AnonymousIdentityResolver{}
+ }
+ return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolverV2 = v
+ }
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.Credentials != nil {
+ return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+ }
+ return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+func ignoreAnonymousAuth(options *Options) {
+ if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+ options.Credentials = nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go
new file mode 100644
index 000000000..a7a5b57de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go
@@ -0,0 +1,309 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type awsRestjson1_serializeOpGetRoleCredentials struct {
+}
+
+func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetRoleCredentialsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/federation/credentials")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.AccessToken != nil {
+ locationName := "X-Amz-Sso_bearer_token"
+ encoder.SetHeader(locationName).String(*v.AccessToken)
+ }
+
+ if v.AccountId != nil {
+ encoder.SetQuery("account_id").String(*v.AccountId)
+ }
+
+ if v.RoleName != nil {
+ encoder.SetQuery("role_name").String(*v.RoleName)
+ }
+
+ return nil
+}
+
+type awsRestjson1_serializeOpListAccountRoles struct {
+}
+
+func (*awsRestjson1_serializeOpListAccountRoles) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListAccountRolesInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/assignment/roles")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.AccessToken != nil {
+ locationName := "X-Amz-Sso_bearer_token"
+ encoder.SetHeader(locationName).String(*v.AccessToken)
+ }
+
+ if v.AccountId != nil {
+ encoder.SetQuery("account_id").String(*v.AccountId)
+ }
+
+ if v.MaxResults != nil {
+ encoder.SetQuery("max_result").Integer(*v.MaxResults)
+ }
+
+ if v.NextToken != nil {
+ encoder.SetQuery("next_token").String(*v.NextToken)
+ }
+
+ return nil
+}
+
+type awsRestjson1_serializeOpListAccounts struct {
+}
+
+func (*awsRestjson1_serializeOpListAccounts) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListAccountsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/assignment/accounts")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "GET"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.AccessToken != nil {
+ locationName := "X-Amz-Sso_bearer_token"
+ encoder.SetHeader(locationName).String(*v.AccessToken)
+ }
+
+ if v.MaxResults != nil {
+ encoder.SetQuery("max_result").Integer(*v.MaxResults)
+ }
+
+ if v.NextToken != nil {
+ encoder.SetQuery("next_token").String(*v.NextToken)
+ }
+
+ return nil
+}
+
+type awsRestjson1_serializeOpLogout struct {
+}
+
+func (*awsRestjson1_serializeOpLogout) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*LogoutInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/logout")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ if v.AccessToken != nil {
+ locationName := "X-Amz-Sso_bearer_token"
+ encoder.SetHeader(locationName).String(*v.AccessToken)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go
new file mode 100644
index 000000000..e97a126e8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go
@@ -0,0 +1,115 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+)
+
+// Indicates that a problem occurred with the input to the request. For example, a
+// required parameter might be missing or out of range.
+type InvalidRequestException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidRequestException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRequestException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRequestException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRequestException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified resource doesn't exist.
+type ResourceNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ResourceNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ResourceNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ResourceNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ResourceNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the request is being made too frequently and is more than what
+// the server can handle.
+type TooManyRequestsException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *TooManyRequestsException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TooManyRequestsException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *TooManyRequestsException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "TooManyRequestsException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the request is not authorized. This can happen due to an invalid
+// access token in the request.
+type UnauthorizedException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *UnauthorizedException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *UnauthorizedException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *UnauthorizedException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "UnauthorizedException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
new file mode 100644
index 000000000..07ac468e3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
@@ -0,0 +1,63 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ smithydocument "github.com/aws/smithy-go/document"
+)
+
+// Provides information about your AWS account.
+type AccountInfo struct {
+
+ // The identifier of the AWS account that is assigned to the user.
+ AccountId *string
+
+ // The display name of the AWS account that is assigned to the user.
+ AccountName *string
+
+ // The email address of the AWS account that is assigned to the user.
+ EmailAddress *string
+
+ noSmithyDocumentSerde
+}
+
+// Provides information about the role credentials that are assigned to the user.
+type RoleCredentials struct {
+
+ // The identifier used for the temporary security credentials. For more
+ // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide.
+ //
+ // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
+ AccessKeyId *string
+
+ // The date on which temporary security credentials expire.
+ Expiration int64
+
+ // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS
+ // IAM User Guide.
+ //
+ // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
+ SecretAccessKey *string
+
+ // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS
+ // IAM User Guide.
+ //
+ // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
+ SessionToken *string
+
+ noSmithyDocumentSerde
+}
+
+// Provides information about the role that is assigned to the user.
+type RoleInfo struct {
+
+ // The identifier of the AWS account assigned to the user.
+ AccountId *string
+
+ // The friendly name of the role that is assigned to the user.
+ RoleName *string
+
+ noSmithyDocumentSerde
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go
new file mode 100644
index 000000000..f6bf461f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go
@@ -0,0 +1,175 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type validateOpGetRoleCredentials struct {
+}
+
+func (*validateOpGetRoleCredentials) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetRoleCredentialsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetRoleCredentialsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListAccountRoles struct {
+}
+
+func (*validateOpListAccountRoles) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListAccountRolesInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListAccountRolesInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListAccounts struct {
+}
+
+func (*validateOpListAccounts) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListAccountsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListAccountsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpLogout struct {
+}
+
+func (*validateOpLogout) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*LogoutInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpLogoutInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After)
+}
+
+func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After)
+}
+
+func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After)
+}
+
+func addOpLogoutValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpLogout{}, middleware.After)
+}
+
+func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"}
+ if v.RoleName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleName"))
+ }
+ if v.AccountId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccountId"))
+ }
+ if v.AccessToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListAccountRolesInput(v *ListAccountRolesInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"}
+ if v.AccessToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+ }
+ if v.AccountId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccountId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListAccountsInput(v *ListAccountsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"}
+ if v.AccessToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpLogoutInput(v *LogoutInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"}
+ if v.AccessToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessToken"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
new file mode 100644
index 000000000..ac8f4c476
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
@@ -0,0 +1,689 @@
+# v1.35.5 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.35.4 (2025-10-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.35.3 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.35.2 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.35.1 (2025-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.35.0 (2025-09-23)
+
+* **Feature**: This release includes exception definition and documentation updates.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.5 (2025-09-22)
+
+* No change notes available for this release.
+
+# v1.34.4 (2025-09-10)
+
+* No change notes available for this release.
+
+# v1.34.3 (2025-09-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.2 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.1 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.0 (2025-08-26)
+
+* **Feature**: Remove incorrect endpoint tests
+
+# v1.33.2 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.1 (2025-08-20)
+
+* **Bug Fix**: Remove unused deserialization code.
+
+# v1.33.0 (2025-08-11)
+
+* **Feature**: Add support for configuring per-service Options via callback on global config.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.0 (2025-08-04)
+
+* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.4 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.3 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.2 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.1 (2025-04-03)
+
+* No change notes available for this release.
+
+# v1.30.0 (2025-03-27)
+
+* **Feature**: This release adds AwsAdditionalDetails in the CreateTokenWithIAM API response.
+
+# v1.29.2 (2025-03-24)
+
+* No change notes available for this release.
+
+# v1.29.1 (2025-03-04.2)
+
+* **Bug Fix**: Add assurance test for operation order.
+
+# v1.29.0 (2025-02-27)
+
+* **Feature**: Track credential providers via User-Agent Feature ids
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.15 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.14 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.13 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.12 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.11 (2025-01-24)
+
+* **Documentation**: Fixed typos in the descriptions.
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.28.10 (2025-01-17)
+
+* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop.
+
+# v1.28.9 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.8 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.7 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.6 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.5 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.4 (2024-10-03)
+
+* No change notes available for this release.
+
+# v1.27.3 (2024-09-27)
+
+* No change notes available for this release.
+
+# v1.27.2 (2024-09-25)
+
+* No change notes available for this release.
+
+# v1.27.1 (2024-09-23)
+
+* No change notes available for this release.
+
+# v1.27.0 (2024-09-20)
+
+* **Feature**: Add tracing and metrics support to service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.8 (2024-09-17)
+
+* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution.
+
+# v1.26.7 (2024-09-04)
+
+* No change notes available for this release.
+
+# v1.26.6 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.5 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.4 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2024-07-03)
+
+* No change notes available for this release.
+
+# v1.26.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.25.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.6 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.5 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.4 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.3 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.24.2 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.1 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2024-05-10)
+
+* **Feature**: Updated request parameters for PKCE support.
+
+# v1.23.5 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.23.4 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.3 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.2 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.22.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.22.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.7 (2024-01-16)
+
+* No change notes available for this release.
+
+# v1.21.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.21.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
+# v1.21.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.3 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.2 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.20.1 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-11-17)
+
+* **Feature**: Adding support for `sso-oauth:CreateTokenWithIAM`.
+
+# v1.19.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2023-09-22)
+
+* No change notes available for this release.
+
+# v1.17.0 (2023-09-20)
+
+* **Feature**: Update FIPS endpoints in aws-us-gov.
+
+# v1.16.0 (2023-09-18)
+
+* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
+* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
+
+# v1.15.6 (2023-09-05)
+
+* No change notes available for this release.
+
+# v1.15.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.15.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.14 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.13 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.12 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.14.11 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.10 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.14.9 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.8 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.14.7 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.6 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.5 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.4 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.14.3 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.2 (2023-02-15)
+
+* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.
+* **Bug Fix**: Correct error type parsing for restJson services.
+
+# v1.14.1 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.13.11 (2022-12-19)
+
+* No change notes available for this release.
+
+# v1.13.10 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2022-09-30)
+
+* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference.
+
+# v1.13.5 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-08-25)
+
+* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action.
+
+# v1.12.14 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2022-08-08)
+
+* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.11 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2022-07-11)
+
+* No change notes available for this release.
+
+# v1.12.9 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.7 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2022-05-27)
+
+* No change notes available for this release.
+
+# v1.12.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2022-02-24)
+
+* **Feature**: API client updated
+* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2022-01-07)
+
+* **Feature**: API client updated
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.2 (2021-12-02)
+
+* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-10-11)
+
+* **Feature**: API client updated
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-09-17)
+
+* **Feature**: Updated API client and endpoints to latest revision.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-08-27)
+
+* **Feature**: Updated API model to latest revision.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go
new file mode 100644
index 000000000..12ad2f5d9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go
@@ -0,0 +1,1019 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithydocument "github.com/aws/smithy-go/document"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net"
+ "net/http"
+ "sync/atomic"
+ "time"
+)
+
+const ServiceID = "SSO OIDC"
+const ServiceAPIVersion = "2019-06-10"
+
+type operationMetrics struct {
+ Duration metrics.Float64Histogram
+ SerializeDuration metrics.Float64Histogram
+ ResolveIdentityDuration metrics.Float64Histogram
+ ResolveEndpointDuration metrics.Float64Histogram
+ SignRequestDuration metrics.Float64Histogram
+ DeserializeDuration metrics.Float64Histogram
+}
+
+func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram {
+ switch name {
+ case "client.call.duration":
+ return m.Duration
+ case "client.call.serialization_duration":
+ return m.SerializeDuration
+ case "client.call.resolve_identity_duration":
+ return m.ResolveIdentityDuration
+ case "client.call.resolve_endpoint_duration":
+ return m.ResolveEndpointDuration
+ case "client.call.signing_duration":
+ return m.SignRequestDuration
+ case "client.call.deserialization_duration":
+ return m.DeserializeDuration
+ default:
+ panic("unrecognized operation metric")
+ }
+}
+
+func timeOperationMetric[T any](
+ ctx context.Context, metric string, fn func() (T, error),
+ opts ...metrics.RecordMetricOption,
+) (T, error) {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ start := time.Now()
+ v, err := fn()
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ return v, err
+}
+
+func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ var ended bool
+ start := time.Now()
+ return func() {
+ if ended {
+ return
+ }
+ ended = true
+
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ }
+}
+
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
+ }
+}
+
+type operationMetricsKey struct{}
+
+func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) {
+ meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc")
+ om := &operationMetrics{}
+
+ var err error
+
+ om.Duration, err = operationMetricTimer(meter, "client.call.duration",
+ "Overall call duration (including retries and time to send or receive request and response body)")
+ if err != nil {
+ return nil, err
+ }
+ om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration",
+ "The time it takes to serialize a message body")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration",
+ "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration",
+ "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request")
+ if err != nil {
+ return nil, err
+ }
+ om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration",
+ "The time it takes to sign a request")
+ if err != nil {
+ return nil, err
+ }
+ om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration",
+ "The time it takes to deserialize a message body")
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(parent, operationMetricsKey{}, om), nil
+}
+
+func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) {
+ return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = desc
+ })
+}
+
+func getOperationMetrics(ctx context.Context) *operationMetrics {
+ return ctx.Value(operationMetricsKey{}).(*operationMetrics)
+}
+
+func operationTracer(p tracing.TracerProvider) tracing.Tracer {
+ return p.Tracer("github.com/aws/aws-sdk-go-v2/service/ssooidc")
+}
+
+// Client provides the API client to make operations call for AWS SSO OIDC.
+type Client struct {
+ options Options
+
+ // Difference between the time reported by the server and the client
+ timeOffset *atomic.Int64
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ resolveDefaultLogger(&options)
+
+ setResolvedDefaultsMode(&options)
+
+ resolveRetryer(&options)
+
+ resolveHTTPClient(&options)
+
+ resolveHTTPSignerV4(&options)
+
+ resolveEndpointResolverV2(&options)
+
+ resolveTracerProvider(&options)
+
+ resolveMeterProvider(&options)
+
+ resolveAuthSchemeResolver(&options)
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ finalizeRetryMaxAttempts(&options)
+
+ ignoreAnonymousAuth(&options)
+
+ wrapWithAnonymousAuth(&options)
+
+ resolveAuthSchemes(&options)
+
+ client := &Client{
+ options: options,
+ }
+
+ initializeTimeOffsetResolver(client)
+
+ return client
+}
+
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+ return c.options.Copy()
+}
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
+ ctx = middleware.ClearStackValues(ctx)
+ ctx = middleware.WithServiceID(ctx, ServiceID)
+ ctx = middleware.WithOperationName(ctx, opID)
+
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ finalizeOperationRetryMaxAttempts(&options, *c)
+
+ finalizeClientEndpointResolverOptions(&options)
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ ctx, err = withOperationMetrics(ctx, options.MeterProvider)
+ if err != nil {
+ return nil, metadata, err
+ }
+
+ tracer := operationTracer(options.TracerProvider)
+ spanName := fmt.Sprintf("%s.%s", ServiceID, opID)
+
+ ctx = tracing.WithOperationTracer(ctx, tracer)
+
+ ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) {
+ o.Kind = tracing.SpanKindClient
+ o.Properties.Set("rpc.system", "aws-api")
+ o.Properties.Set("rpc.method", opID)
+ o.Properties.Set("rpc.service", ServiceID)
+ })
+ endTimer := startMetricTimer(ctx, "client.call.duration")
+ defer endTimer()
+ defer span.End()
+
+ handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) {
+ o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc")
+ })
+ decorated := middleware.DecorateHandler(handler, stack)
+ result, metadata, err = decorated.Handle(ctx, params)
+ if err != nil {
+ span.SetProperty("exception.type", fmt.Sprintf("%T", err))
+ span.SetProperty("exception.message", err.Error())
+
+ var aerr smithy.APIError
+ if errors.As(err, &aerr) {
+ span.SetProperty("api.error_code", aerr.ErrorCode())
+ span.SetProperty("api.error_message", aerr.ErrorMessage())
+ span.SetProperty("api.error_fault", aerr.ErrorFault().String())
+ }
+
+ err = &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+
+ span.SetProperty("error", err != nil)
+ if err == nil {
+ span.SetStatus(tracing.SpanStatusOK)
+ } else {
+ span.SetStatus(tracing.SpanStatusError)
+ }
+
+ return result, metadata, err
+}
+
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+ return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+ return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+ return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = setOperationInput(ctx, in.Parameters)
+ return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %v", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %v", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+ if options.AuthSchemeResolver == nil {
+ options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+ }
+}
+
+func resolveAuthSchemes(options *Options) {
+ if options.AuthSchemes == nil {
+ options.AuthSchemes = []smithyhttp.AuthScheme{
+ internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+ Signer: options.HTTPSignerV4,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ }
+ }
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+type legacyEndpointContextSetter struct {
+ LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+ return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.LegacyResolver != nil {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+ }
+
+ return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+ return stack.Initialize.Add(&legacyEndpointContextSetter{
+ LegacyResolver: o.EndpointResolver,
+ }, middleware.Before)
+}
+
+func resolveDefaultLogger(o *Options) {
+ if o.Logger != nil {
+ return
+ }
+ o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+func setResolvedDefaultsMode(o *Options) {
+ if len(o.resolvedDefaultsMode) > 0 {
+ return
+ }
+
+ var mode aws.DefaultsMode
+ mode.SetFromString(string(o.DefaultsMode))
+
+ if mode == aws.DefaultsModeAuto {
+ mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
+ }
+
+ o.resolvedDefaultsMode = mode
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ Region: cfg.Region,
+ DefaultsMode: cfg.DefaultsMode,
+ RuntimeEnvironment: cfg.RuntimeEnvironment,
+ HTTPClient: cfg.HTTPClient,
+ Credentials: cfg.Credentials,
+ APIOptions: cfg.APIOptions,
+ Logger: cfg.Logger,
+ ClientLogMode: cfg.ClientLogMode,
+ AppID: cfg.AppID,
+ AuthSchemePreference: cfg.AuthSchemePreference,
+ }
+ resolveAWSRetryerProvider(cfg, &opts)
+ resolveAWSRetryMaxAttempts(cfg, &opts)
+ resolveAWSRetryMode(cfg, &opts)
+ resolveAWSEndpointResolver(cfg, &opts)
+ resolveInterceptors(cfg, &opts)
+ resolveUseDualStackEndpoint(cfg, &opts)
+ resolveUseFIPSEndpoint(cfg, &opts)
+ resolveBaseEndpoint(cfg, &opts)
+ return New(opts, func(o *Options) {
+ for _, opt := range cfg.ServiceOptions {
+ opt(ServiceID, o)
+ }
+ for _, opt := range optFns {
+ opt(o)
+ }
+ })
+}
+
+func resolveHTTPClient(o *Options) {
+ var buildable *awshttp.BuildableClient
+
+ if o.HTTPClient != nil {
+ var ok bool
+ buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
+ if !ok {
+ return
+ }
+ } else {
+ buildable = awshttp.NewBuildableClient()
+ }
+
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
+ if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
+ dialer.Timeout = dialerTimeout
+ }
+ })
+
+ buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
+ if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
+ transport.TLSHandshakeTimeout = tlsHandshakeTimeout
+ }
+ })
+ }
+
+ o.HTTPClient = buildable
+}
+
+func resolveRetryer(o *Options) {
+ if o.Retryer != nil {
+ return
+ }
+
+ if len(o.RetryMode) == 0 {
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ o.RetryMode = modeConfig.RetryMode
+ }
+ }
+ if len(o.RetryMode) == 0 {
+ o.RetryMode = aws.RetryModeStandard
+ }
+
+ var standardOptions []func(*retry.StandardOptions)
+ if v := o.RetryMaxAttempts; v != 0 {
+ standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
+ so.MaxAttempts = v
+ })
+ }
+
+ switch o.RetryMode {
+ case aws.RetryModeAdaptive:
+ var adaptiveOptions []func(*retry.AdaptiveModeOptions)
+ if len(standardOptions) != 0 {
+ adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
+ ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
+ })
+ }
+ o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
+
+ default:
+ o.Retryer = retry.NewStandard(standardOptions...)
+ }
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+ if cfg.Retryer == nil {
+ return
+ }
+ o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSRetryMode(cfg aws.Config, o *Options) {
+ if len(cfg.RetryMode) == 0 {
+ return
+ }
+ o.RetryMode = cfg.RetryMode
+}
+func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
+ if cfg.RetryMaxAttempts == 0 {
+ return
+ }
+ o.RetryMaxAttempts = cfg.RetryMaxAttempts
+}
+
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
+ if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+ if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
+ return
+ }
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
+}
+
+func resolveInterceptors(cfg aws.Config, o *Options) {
+ o.Interceptors = cfg.Interceptors.Copy()
+}
+
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)
+ if len(options.AppID) > 0 {
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+ }
+
+ return nil
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
+
+type HTTPSignerV4 interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+ if o.HTTPSignerV4 != nil {
+ return
+ }
+ o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+ return v4.NewSigner(func(so *v4.SignerOptions) {
+ so.Logger = o.Logger
+ so.LogSigning = o.ClientLogMode.IsSigning()
+ })
+}
+
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+
+func addSpanRetryLoop(stack *middleware.Stack, options Options) error {
+ return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before)
+}
+
+type spanRetryLoop struct {
+ options Options
+}
+
+func (*spanRetryLoop) ID() string {
+ return "spanRetryLoop"
+}
+
+func (m *spanRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ middleware.FinalizeOutput, middleware.Metadata, error,
+) {
+ tracer := operationTracer(m.options.TracerProvider)
+ ctx, span := tracer.StartSpan(ctx, "RetryLoop")
+ defer span.End()
+
+ return next.HandleFinalize(ctx, in)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+ return nil
+ })
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+ return nil
+ })
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc")
+ })
+ if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
+ }
+ return nil
+}
+
+// resolves dual-stack endpoint configuration
+func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseDualStackEndpoint = value
+ }
+ return nil
+}
+
+// resolves FIPS endpoint configuration
+func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseFIPSEndpoint = value
+ }
+ return nil
+}
+
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+ if mode == aws.AccountIDEndpointModeDisabled {
+ return nil
+ }
+
+ if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+ return aws.String(ca.Credentials.AccountID)
+ }
+
+ return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+ mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+ if err := stack.Build.Add(&mw, middleware.After); err != nil {
+ return err
+ }
+ return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+ c.timeOffset = new(atomic.Int64)
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ switch options.Retryer.(type) {
+ case *retry.Standard:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+ case *retry.AdaptiveMode:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+ }
+ return nil
+}
+
+type setCredentialSourceMiddleware struct {
+ ua *awsmiddleware.RequestUserAgent
+ options Options
+}
+
+func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" }
+
+func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource)
+ if !ok {
+ return next.HandleBuild(ctx, in)
+ }
+ providerSources := asProviderSource.ProviderSources()
+ for _, source := range providerSources {
+ m.ua.AddCredentialsSource(source)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+func addCredentialSource(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ mw := setCredentialSourceMiddleware{ua: ua, options: options}
+ return stack.Build.Insert(&mw, "UserAgent", middleware.Before)
+}
+
+func resolveTracerProvider(options *Options) {
+ if options.TracerProvider == nil {
+ options.TracerProvider = &tracing.NopTracerProvider{}
+ }
+}
+
+func resolveMeterProvider(options *Options) {
+ if options.MeterProvider == nil {
+ options.MeterProvider = metrics.NopMeterProvider{}
+ }
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
+func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+ return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: o.ClientLogMode.IsRequest(),
+ LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
+ LogResponse: o.ClientLogMode.IsResponse(),
+ LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+}
+
+type disableHTTPSMiddleware struct {
+ DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+ return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+ req.URL.Scheme = "http"
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Finalize.Insert(&disableHTTPSMiddleware{
+ DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{
+ Interceptors: opts.Interceptors.BeforeRetryLoop,
+ }, "Retry", middleware.Before)
+}
+
+func addInterceptAttempt(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{
+ BeforeAttempt: opts.Interceptors.BeforeAttempt,
+ AfterAttempt: opts.Interceptors.AfterAttempt,
+ }, "Retry", middleware.After)
+}
+
+func addInterceptExecution(stack *middleware.Stack, opts Options) error {
+ return stack.Initialize.Add(&smithyhttp.InterceptExecution{
+ BeforeExecution: opts.Interceptors.BeforeExecution,
+ AfterExecution: opts.Interceptors.AfterExecution,
+ }, middleware.Before)
+}
+
+func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error {
+ return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{
+ Interceptors: opts.Interceptors.BeforeSerialization,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error {
+ return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{
+ Interceptors: opts.Interceptors.AfterSerialization,
+ }, "OperationSerializer", middleware.After)
+}
+
+func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{
+ Interceptors: opts.Interceptors.BeforeSigning,
+ }, "Signing", middleware.Before)
+}
+
+func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{
+ Interceptors: opts.Interceptors.AfterSigning,
+ }, "Signing", middleware.After)
+}
+
+func addInterceptTransmit(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{
+ BeforeTransmit: opts.Interceptors.BeforeTransmit,
+ AfterTransmit: opts.Interceptors.AfterTransmit,
+ }, middleware.After)
+}
+
+func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{
+ Interceptors: opts.Interceptors.BeforeDeserialization,
+ }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse)
+}
+
+func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{
+ Interceptors: opts.Interceptors.AfterDeserialization,
+ }, "OperationDeserializer", middleware.Before)
+}
+
+type spanInitializeStart struct {
+}
+
+func (*spanInitializeStart) ID() string {
+ return "spanInitializeStart"
+}
+
+func (m *spanInitializeStart) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "Initialize")
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanInitializeEnd struct {
+}
+
+func (*spanInitializeEnd) ID() string {
+ return "spanInitializeEnd"
+}
+
+func (m *spanInitializeEnd) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanBuildRequestStart struct {
+}
+
+func (*spanBuildRequestStart) ID() string {
+ return "spanBuildRequestStart"
+}
+
+func (m *spanBuildRequestStart) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ middleware.SerializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "BuildRequest")
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type spanBuildRequestEnd struct {
+}
+
+func (*spanBuildRequestEnd) ID() string {
+ return "spanBuildRequestEnd"
+}
+
+func (m *spanBuildRequestEnd) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ middleware.BuildOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleBuild(ctx, in)
+}
+
+func addSpanInitializeStart(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before)
+}
+
+func addSpanInitializeEnd(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After)
+}
+
+func addSpanBuildRequestStart(stack *middleware.Stack) error {
+ return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before)
+}
+
+func addSpanBuildRequestEnd(stack *middleware.Stack) error {
+ return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
new file mode 100644
index 000000000..681eb4087
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
@@ -0,0 +1,271 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates and returns access and refresh tokens for clients that are
+// authenticated using client secrets. The access token can be used to fetch
+// short-lived credentials for the assigned AWS accounts or to access application
+// APIs using bearer authentication.
+func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) {
+ if params == nil {
+ params = &CreateTokenInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateTokenOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateTokenInput struct {
+
+ // The unique identifier string for the client or application. This value comes
+ // from the result of the RegisterClientAPI.
+ //
+ // This member is required.
+ ClientId *string
+
+ // A secret string generated for the client. This value should come from the
+ // persisted result of the RegisterClientAPI.
+ //
+ // This member is required.
+ ClientSecret *string
+
+ // Supports the following OAuth grant types: Authorization Code, Device Code, and
+ // Refresh Token. Specify one of the following values, depending on the grant type
+ // that you want:
+ //
+ // * Authorization Code - authorization_code
+ //
+ // * Device Code - urn:ietf:params:oauth:grant-type:device_code
+ //
+ // * Refresh Token - refresh_token
+ //
+ // This member is required.
+ GrantType *string
+
+ // Used only when calling this API for the Authorization Code grant type. The
+ // short-lived code is used to identify this authorization request.
+ Code *string
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ CodeVerifier *string
+
+ // Used only when calling this API for the Device Code grant type. This
+ // short-lived code is used to identify this authorization request. This comes from
+ // the result of the StartDeviceAuthorizationAPI.
+ DeviceCode *string
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value specifies the location of the client or application that has registered to
+ // receive the authorization code.
+ RedirectUri *string
+
+ // Used only when calling this API for the Refresh Token grant type. This token is
+ // used to refresh short-lived tokens, such as the access token, that might expire.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+ RefreshToken *string
+
+ // The list of scopes for which authorization is requested. This parameter has no
+ // effect; the access token will always include all scopes configured during client
+ // registration.
+ Scope []string
+
+ noSmithyDocumentSerde
+}
+
+type CreateTokenOutput struct {
+
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
+ AccessToken *string
+
+ // Indicates the time in seconds when an access token will expire.
+ ExpiresIn int32
+
+ // The idToken is not implemented or supported. For more information about the
+ // features and limitations of the current IAM Identity Center OIDC implementation,
+ // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference].
+ //
+ // A JSON Web Token (JWT) that identifies who is associated with the issued access
+ // token.
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+ IdToken *string
+
+ // A token that, if present, can be used to refresh a previously issued access
+ // token that might have expired.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+ RefreshToken *string
+
+ // Used to notify the client that the returned token is an access token. The
+ // supported token type is Bearer .
+ TokenType *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateToken"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateTokenValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateToken",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
new file mode 100644
index 000000000..d7a27da59
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
@@ -0,0 +1,318 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/ssooidc/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates and returns access and refresh tokens for authorized client
+// applications that are authenticated using any IAM entity, such as a service role
+// or user. These tokens might contain defined scopes that specify permissions such
+// as read:profile or write:data . Through downscoping, you can use the scopes
+// parameter to request tokens with reduced permissions compared to the original
+// client application's permissions or, if applicable, the refresh token's scopes.
+// The access token can be used to fetch short-lived credentials for the assigned
+// Amazon Web Services accounts or to access application APIs using bearer
+// authentication.
+//
+// This API is used with Signature Version 4. For more information, see [Amazon Web Services Signature Version 4 for API Requests].
+//
+// [Amazon Web Services Signature Version 4 for API Requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv.html
+func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) {
+ if params == nil {
+ params = &CreateTokenWithIAMInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateTokenWithIAM", params, optFns, c.addOperationCreateTokenWithIAMMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateTokenWithIAMOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateTokenWithIAMInput struct {
+
+ // The unique identifier string for the client or application. This value is an
+ // application ARN that has OAuth grants configured.
+ //
+ // This member is required.
+ ClientId *string
+
+ // Supports the following OAuth grant types: Authorization Code, Refresh Token,
+ // JWT Bearer, and Token Exchange. Specify one of the following values, depending
+ // on the grant type that you want:
+ //
+ // * Authorization Code - authorization_code
+ //
+ // * Refresh Token - refresh_token
+ //
+ // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer
+ //
+ // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange
+ //
+ // This member is required.
+ GrantType *string
+
+ // Used only when calling this API for the JWT Bearer grant type. This value
+ // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To
+ // authorize a trusted token issuer, configure the JWT Bearer GrantOptions for the
+ // application.
+ Assertion *string
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // short-lived code is used to identify this authorization request. The code is
+ // obtained through a redirect from IAM Identity Center to a redirect URI persisted
+ // in the Authorization Code GrantOptions for the application.
+ Code *string
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ CodeVerifier *string
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value specifies the location of the client or application that has registered to
+ // receive the authorization code.
+ RedirectUri *string
+
+ // Used only when calling this API for the Refresh Token grant type. This token is
+ // used to refresh short-lived tokens, such as the access token, that might expire.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+ RefreshToken *string
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the type of token that the requester can receive. The following values
+ // are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+ RequestedTokenType *string
+
+ // The list of scopes for which authorization is requested. The access token that
+ // is issued is limited to the scopes that are granted. If the value is not
+ // specified, IAM Identity Center authorizes all scopes configured for the
+ // application, including the following default scopes: openid , aws ,
+ // sts:identity_context .
+ Scope []string
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the subject of the exchange. The value of the subject token must be an
+ // access token issued by IAM Identity Center to a different client or application.
+ // The access token must have authorized scopes that indicate the requested
+ // application as a target audience.
+ SubjectToken *string
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the type of token that is passed as the subject of the exchange. The
+ // following value is supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ SubjectTokenType *string
+
+ noSmithyDocumentSerde
+}
+
+type CreateTokenWithIAMOutput struct {
+
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
+ AccessToken *string
+
+ // A structure containing information from IAM Identity Center managed user and
+ // group information.
+ AwsAdditionalDetails *types.AwsAdditionalDetails
+
+ // Indicates the time in seconds when an access token will expire.
+ ExpiresIn int32
+
+ // A JSON Web Token (JWT) that identifies the user associated with the issued
+ // access token.
+ IdToken *string
+
+ // Indicates the type of tokens that are issued by IAM Identity Center. The
+ // following values are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+ IssuedTokenType *string
+
+ // A token that, if present, can be used to refresh a previously issued access
+ // token that might have expired.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+ RefreshToken *string
+
+ // The list of scopes for which authorization is granted. The access token that is
+ // issued is limited to the scopes that are granted.
+ Scope []string
+
+ // Used to notify the requester that the returned token is an access token. The
+ // supported token type is Bearer .
+ TokenType *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateTokenWithIAM{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateTokenWithIAM{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTokenWithIAM"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opCreateTokenWithIAM(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateTokenWithIAM",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
new file mode 100644
index 000000000..8d50092fb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
@@ -0,0 +1,242 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Registers a public client with IAM Identity Center. This allows clients to
+// perform authorization using the authorization code grant with Proof Key for Code
+// Exchange (PKCE) or the device code grant.
+func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) {
+ if params == nil {
+ params = &RegisterClientInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*RegisterClientOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type RegisterClientInput struct {
+
+ // The friendly name of the client.
+ //
+ // This member is required.
+ ClientName *string
+
+ // The type of client. The service supports only public as a client type. Anything
+ // other than public will be rejected by the service.
+ //
+ // This member is required.
+ ClientType *string
+
+ // This IAM Identity Center application ARN is used to define
+ // administrator-managed configuration for public client access to resources. At
+ // authorization, the scopes, grants, and redirect URI available to this client
+ // will be restricted by this application resource.
+ EntitledApplicationArn *string
+
+ // The list of OAuth 2.0 grant types that are defined by the client. This list is
+ // used to restrict the token granting flows available to the client. Supports the
+ // following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh
+ // Token.
+ //
+ // * Authorization Code - authorization_code
+ //
+ // * Device Code - urn:ietf:params:oauth:grant-type:device_code
+ //
+ // * Refresh Token - refresh_token
+ GrantTypes []string
+
+ // The IAM Identity Center Issuer URL associated with an instance of IAM Identity
+ // Center. This value is needed for user access to resources through the client.
+ IssuerUrl *string
+
+ // The list of redirect URI that are defined by the client. At completion of
+ // authorization, this list is used to restrict what locations the user agent can
+ // be redirected back to.
+ RedirectUris []string
+
+ // The list of scopes that are defined by the client. Upon authorization, this
+ // list is used to restrict permissions when granting an access token.
+ Scopes []string
+
+ noSmithyDocumentSerde
+}
+
+type RegisterClientOutput struct {
+
+ // An endpoint that the client can use to request authorization.
+ AuthorizationEndpoint *string
+
+ // The unique identifier string for each client. This client uses this identifier
+ // to get authenticated by the service in subsequent calls.
+ ClientId *string
+
+ // Indicates the time at which the clientId and clientSecret were issued.
+ ClientIdIssuedAt int64
+
+ // A secret string generated for the client. The client will use this string to
+ // get authenticated by the service in subsequent calls.
+ ClientSecret *string
+
+ // Indicates the time at which the clientId and clientSecret will become invalid.
+ ClientSecretExpiresAt int64
+
+ // An endpoint that the client can use to create tokens.
+ TokenEndpoint *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterClient"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpRegisterClientValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "RegisterClient",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
new file mode 100644
index 000000000..7242ac82b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
@@ -0,0 +1,224 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Initiates device authorization by requesting a pair of verification codes from
+// the authorization service.
+func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) {
+ if params == nil {
+ params = &StartDeviceAuthorizationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*StartDeviceAuthorizationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type StartDeviceAuthorizationInput struct {
+
+ // The unique identifier string for the client that is registered with IAM
+ // Identity Center. This value should come from the persisted result of the RegisterClientAPI
+ // operation.
+ //
+ // This member is required.
+ ClientId *string
+
+ // A secret string that is generated for the client. This value should come from
+ // the persisted result of the RegisterClientAPI operation.
+ //
+ // This member is required.
+ ClientSecret *string
+
+ // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal]
+ // in the IAM Identity Center User Guide.
+ //
+ // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html
+ //
+ // This member is required.
+ StartUrl *string
+
+ noSmithyDocumentSerde
+}
+
+type StartDeviceAuthorizationOutput struct {
+
+ // The short-lived code that is used by the device when polling for a session
+ // token.
+ DeviceCode *string
+
+ // Indicates the number of seconds in which the verification code will become
+ // invalid.
+ ExpiresIn int32
+
+ // Indicates the number of seconds the client must wait between attempts when
+ // polling for a session.
+ Interval int32
+
+ // A one-time user verification code. This is needed to authorize an in-use device.
+ UserCode *string
+
+ // The URI of the verification page that takes the userCode to authorize the
+ // device.
+ VerificationUri *string
+
+ // An alternate URL that the client can use to automatically launch a browser.
+ // This process skips the manual step in which the user visits the verification
+ // page and enters their code.
+ VerificationUriComplete *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "StartDeviceAuthorization"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "StartDeviceAuthorization",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go
new file mode 100644
index 000000000..89b01c629
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go
@@ -0,0 +1,357 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "slices"
+ "strings"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+ params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+ return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+ }
+ }
+
+ if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+ // The name of the operation being invoked.
+ Operation string
+
+ // The region in which the operation is being invoked.
+ Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+ params := &AuthResolverParameters{
+ Operation: operation,
+ }
+
+ bindAuthParamsRegion(ctx, params, input, options)
+
+ return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+ ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ if overrides, ok := operationAuthOptions[params.Operation]; ok {
+ return overrides(params), nil
+ }
+ return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+ "CreateToken": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "RegisterClient": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "StartDeviceAuthorization": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {
+ SchemeID: smithyauth.SchemeIDSigV4,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4SigningName(&props, "sso-oauth")
+ smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+ return props
+ }(),
+ },
+ }
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveAuthScheme")
+ defer span.End()
+
+ params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+ options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+ }
+
+ scheme, ok := m.selectScheme(options)
+ if !ok {
+ return out, metadata, fmt.Errorf("could not select an auth scheme")
+ }
+
+ ctx = setResolvedAuthScheme(ctx, scheme)
+
+ span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID())
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+ sorted := sortAuthOptions(options, m.options.AuthSchemePreference)
+ for _, option := range sorted {
+ if option.SchemeID == smithyauth.SchemeIDAnonymous {
+ return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+ }
+
+ for _, scheme := range m.options.AuthSchemes {
+ if scheme.SchemeID() != option.SchemeID {
+ continue
+ }
+
+ if scheme.IdentityResolver(m.options) != nil {
+ return newResolvedAuthScheme(scheme, option), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option {
+ byPriority := make([]*smithyauth.Option, 0, len(options))
+ for _, prefName := range preferred {
+ for _, option := range options {
+ optName := option.SchemeID
+ if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 {
+ optName = parts[1]
+ }
+ if prefName == optName {
+ byPriority = append(byPriority, option)
+ }
+ }
+ }
+ for _, option := range options {
+ if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool {
+ return o.SchemeID == option.SchemeID
+ }) {
+ byPriority = append(byPriority, option)
+ }
+ }
+ return byPriority
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+ Scheme smithyhttp.AuthScheme
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+ return &resolvedAuthScheme{
+ Scheme: scheme,
+ IdentityProperties: option.IdentityProperties,
+ SignerProperties: option.SignerProperties,
+ }
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+ return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+ v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+ return v
+}
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ innerCtx, span := tracing.StartSpan(ctx, "GetIdentity")
+ defer span.End()
+
+ rscheme := getResolvedAuthScheme(innerCtx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ resolver := rscheme.Scheme.IdentityResolver(m.options)
+ if resolver == nil {
+ return out, metadata, fmt.Errorf("no identity resolver")
+ }
+
+ identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration",
+ func() (smithyauth.Identity, error) {
+ return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties)
+ },
+ func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("get identity: %w", err)
+ }
+
+ ctx = setIdentity(ctx, identity)
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+ return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+ v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+ return v
+}
+
+type signRequestMiddleware struct {
+ options Options
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "SignRequest")
+ defer span.End()
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ identity := getIdentity(ctx)
+ if identity == nil {
+ return out, metadata, fmt.Errorf("no identity")
+ }
+
+ signer := rscheme.Scheme.Signer()
+ if signer == nil {
+ return out, metadata, fmt.Errorf("no signer")
+ }
+
+ _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) {
+ return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties)
+ }, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("sign request: %w", err)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
new file mode 100644
index 000000000..fb9a0df51
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
@@ -0,0 +1,2244 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson"
+ "github.com/aws/aws-sdk-go-v2/service/ssooidc/types"
+ smithy "github.com/aws/smithy-go"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "strings"
+)
+
+type awsRestjson1_deserializeOpCreateToken struct {
+}
+
+func (*awsRestjson1_deserializeOpCreateToken) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata)
+ }
+ output := &CreateTokenOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("AccessDeniedException", errorCode):
+ return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody)
+
+ case strings.EqualFold("AuthorizationPendingException", errorCode):
+ return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody)
+
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("InternalServerException", errorCode):
+ return awsRestjson1_deserializeErrorInternalServerException(response, errorBody)
+
+ case strings.EqualFold("InvalidClientException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody)
+
+ case strings.EqualFold("InvalidGrantException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody)
+
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("InvalidScopeException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody)
+
+ case strings.EqualFold("SlowDownException", errorCode):
+ return awsRestjson1_deserializeErrorSlowDownException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedClientException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody)
+
+ case strings.EqualFold("UnsupportedGrantTypeException", errorCode):
+ return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *CreateTokenOutput
+ if *v == nil {
+ sv = &CreateTokenOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accessToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value)
+ }
+ sv.AccessToken = ptr.String(jtv)
+ }
+
+ case "expiresIn":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ExpiresIn = int32(i64)
+ }
+
+ case "idToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IdToken to be of type string, got %T instead", value)
+ }
+ sv.IdToken = ptr.String(jtv)
+ }
+
+ case "refreshToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value)
+ }
+ sv.RefreshToken = ptr.String(jtv)
+ }
+
+ case "tokenType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TokenType to be of type string, got %T instead", value)
+ }
+ sv.TokenType = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestjson1_deserializeOpCreateTokenWithIAM struct {
+}
+
+func (*awsRestjson1_deserializeOpCreateTokenWithIAM) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response, &metadata)
+ }
+ output := &CreateTokenWithIAMOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("AccessDeniedException", errorCode):
+ return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody)
+
+ case strings.EqualFold("AuthorizationPendingException", errorCode):
+ return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody)
+
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("InternalServerException", errorCode):
+ return awsRestjson1_deserializeErrorInternalServerException(response, errorBody)
+
+ case strings.EqualFold("InvalidClientException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody)
+
+ case strings.EqualFold("InvalidGrantException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody)
+
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("InvalidRequestRegionException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestRegionException(response, errorBody)
+
+ case strings.EqualFold("InvalidScopeException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody)
+
+ case strings.EqualFold("SlowDownException", errorCode):
+ return awsRestjson1_deserializeErrorSlowDownException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedClientException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody)
+
+ case strings.EqualFold("UnsupportedGrantTypeException", errorCode):
+ return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(v **CreateTokenWithIAMOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *CreateTokenWithIAMOutput
+ if *v == nil {
+ sv = &CreateTokenWithIAMOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accessToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value)
+ }
+ sv.AccessToken = ptr.String(jtv)
+ }
+
+ case "awsAdditionalDetails":
+ if err := awsRestjson1_deserializeDocumentAwsAdditionalDetails(&sv.AwsAdditionalDetails, value); err != nil {
+ return err
+ }
+
+ case "expiresIn":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ExpiresIn = int32(i64)
+ }
+
+ case "idToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IdToken to be of type string, got %T instead", value)
+ }
+ sv.IdToken = ptr.String(jtv)
+ }
+
+ case "issuedTokenType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TokenTypeURI to be of type string, got %T instead", value)
+ }
+ sv.IssuedTokenType = ptr.String(jtv)
+ }
+
+ case "refreshToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value)
+ }
+ sv.RefreshToken = ptr.String(jtv)
+ }
+
+ case "scope":
+ if err := awsRestjson1_deserializeDocumentScopes(&sv.Scope, value); err != nil {
+ return err
+ }
+
+ case "tokenType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TokenType to be of type string, got %T instead", value)
+ }
+ sv.TokenType = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestjson1_deserializeOpRegisterClient struct {
+}
+
+func (*awsRestjson1_deserializeOpRegisterClient) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata)
+ }
+ output := &RegisterClientOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InternalServerException", errorCode):
+ return awsRestjson1_deserializeErrorInternalServerException(response, errorBody)
+
+ case strings.EqualFold("InvalidClientMetadataException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody)
+
+ case strings.EqualFold("InvalidRedirectUriException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody)
+
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("InvalidScopeException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody)
+
+ case strings.EqualFold("SlowDownException", errorCode):
+ return awsRestjson1_deserializeErrorSlowDownException(response, errorBody)
+
+ case strings.EqualFold("UnsupportedGrantTypeException", errorCode):
+ return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *RegisterClientOutput
+ if *v == nil {
+ sv = &RegisterClientOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "authorizationEndpoint":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected URI to be of type string, got %T instead", value)
+ }
+ sv.AuthorizationEndpoint = ptr.String(jtv)
+ }
+
+ case "clientId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ClientId to be of type string, got %T instead", value)
+ }
+ sv.ClientId = ptr.String(jtv)
+ }
+
+ case "clientIdIssuedAt":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ClientIdIssuedAt = i64
+ }
+
+ case "clientSecret":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value)
+ }
+ sv.ClientSecret = ptr.String(jtv)
+ }
+
+ case "clientSecretExpiresAt":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ClientSecretExpiresAt = i64
+ }
+
+ case "tokenEndpoint":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected URI to be of type string, got %T instead", value)
+ }
+ sv.TokenEndpoint = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type awsRestjson1_deserializeOpStartDeviceAuthorization struct {
+}
+
+func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata)
+ }
+ output := &StartDeviceAuthorizationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("InternalServerException", errorCode):
+ return awsRestjson1_deserializeErrorInternalServerException(response, errorBody)
+
+ case strings.EqualFold("InvalidClientException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody)
+
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("SlowDownException", errorCode):
+ return awsRestjson1_deserializeErrorSlowDownException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedClientException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *StartDeviceAuthorizationOutput
+ if *v == nil {
+ sv = &StartDeviceAuthorizationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "deviceCode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value)
+ }
+ sv.DeviceCode = ptr.String(jtv)
+ }
+
+ case "expiresIn":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ExpiresIn = int32(i64)
+ }
+
+ case "interval":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.Interval = int32(i64)
+ }
+
+ case "userCode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected UserCode to be of type string, got %T instead", value)
+ }
+ sv.UserCode = ptr.String(jtv)
+ }
+
+ case "verificationUri":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected URI to be of type string, got %T instead", value)
+ }
+ sv.VerificationUri = ptr.String(jtv)
+ }
+
+ case "verificationUriComplete":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected URI to be of type string, got %T instead", value)
+ }
+ sv.VerificationUriComplete = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.AccessDeniedException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.AuthorizationPendingException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.ExpiredTokenException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InternalServerException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidClientException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidClientMetadataException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidGrantException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidRedirectUriException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidRequestException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorInvalidRequestRegionException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidRequestRegionException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidRequestRegionException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidScopeException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.SlowDownException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.UnauthorizedClientException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.UnsupportedGrantTypeException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
+func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AccessDeniedException
+ if *v == nil {
+ sv = &types.AccessDeniedException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ case "reason":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccessDeniedExceptionReason to be of type string, got %T instead", value)
+ }
+ sv.Reason = types.AccessDeniedExceptionReason(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AuthorizationPendingException
+ if *v == nil {
+ sv = &types.AuthorizationPendingException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentAwsAdditionalDetails(v **types.AwsAdditionalDetails, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AwsAdditionalDetails
+ if *v == nil {
+ sv = &types.AwsAdditionalDetails{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "identityContext":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IdentityContext to be of type string, got %T instead", value)
+ }
+ sv.IdentityContext = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ExpiredTokenException
+ if *v == nil {
+ sv = &types.ExpiredTokenException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InternalServerException
+ if *v == nil {
+ sv = &types.InternalServerException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidClientException
+ if *v == nil {
+ sv = &types.InvalidClientException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidClientMetadataException
+ if *v == nil {
+ sv = &types.InvalidClientMetadataException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidGrantException
+ if *v == nil {
+ sv = &types.InvalidGrantException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidRedirectUriException
+ if *v == nil {
+ sv = &types.InvalidRedirectUriException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidRequestException
+ if *v == nil {
+ sv = &types.InvalidRequestException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ case "reason":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected InvalidRequestExceptionReason to be of type string, got %T instead", value)
+ }
+ sv.Reason = types.InvalidRequestExceptionReason(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidRequestRegionException(v **types.InvalidRequestRegionException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidRequestRegionException
+ if *v == nil {
+ sv = &types.InvalidRequestRegionException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "endpoint":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Location to be of type string, got %T instead", value)
+ }
+ sv.Endpoint = ptr.String(jtv)
+ }
+
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ case "region":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Region to be of type string, got %T instead", value)
+ }
+ sv.Region = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidScopeException
+ if *v == nil {
+ sv = &types.InvalidScopeException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentScopes(v *[]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []string
+ if *v == nil {
+ cv = []string{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Scope to be of type string, got %T instead", value)
+ }
+ col = jtv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.SlowDownException
+ if *v == nil {
+ sv = &types.SlowDownException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.UnauthorizedClientException
+ if *v == nil {
+ sv = &types.UnauthorizedClientException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.UnsupportedGrantTypeException
+ if *v == nil {
+ sv = &types.UnsupportedGrantTypeException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go
new file mode 100644
index 000000000..aa9cf731d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go
@@ -0,0 +1,49 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package ssooidc provides the API client, operations, and parameter types for
+// AWS SSO OIDC.
+//
+// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a
+// client (such as CLI or a native application) to register with IAM Identity
+// Center. The service also enables the client to fetch the user’s access token
+// upon successful authentication and authorization with IAM Identity Center.
+//
+// # API namespaces
+//
+// IAM Identity Center uses the sso and identitystore API namespaces. IAM Identity
+// Center OpenID Connect uses the sso-oauth namespace.
+//
+// # Considerations for using this guide
+//
+// Before you begin using this guide, we recommend that you first review the
+// following important information about how the IAM Identity Center OIDC service
+// works.
+//
+// - The IAM Identity Center OIDC service currently implements only the portions
+// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to
+// enable single sign-on authentication with the CLI.
+//
+// - With older versions of the CLI, the service only emits OIDC access tokens,
+// so to obtain a new token, users must explicitly re-authenticate. To access the
+// OIDC flow that supports token refresh and doesn’t require re-authentication,
+// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with
+// support for OIDC token refresh and configurable IAM Identity Center session
+// durations. For more information, see [Configure Amazon Web Services access portal session duration].
+//
+// - The access tokens provided by this service grant access to all Amazon Web
+// Services account entitlements assigned to an IAM Identity Center user, not just
+// a particular application.
+//
+// - The documentation in this guide does not describe the mechanism to convert
+// the access token into Amazon Web Services Auth (“sigv4”) credentials for use
+// with IAM-protected Amazon Web Services service endpoints. For more information,
+// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide.
+//
+// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity
+// Center User Guide.
+//
+// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html
+// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html
+// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628
+// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
+package ssooidc
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go
new file mode 100644
index 000000000..1e001f7a9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go
@@ -0,0 +1,558 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+ return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return fn(region, options)
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+ e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+ for _, fn := range optFns {
+ fn(&e)
+ }
+
+ return EndpointResolverFunc(
+ func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+ if len(e.SigningRegion) == 0 {
+ e.SigningRegion = region
+ }
+ return e, nil
+ },
+ )
+}
+
+type ResolveEndpoint struct {
+ Resolver EndpointResolver
+ Options EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.Resolver == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ eo := m.Options
+ eo.Logger = middleware.GetLogger(ctx)
+
+ var endpoint aws.Endpoint
+ endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
+ if err != nil {
+ nf := (&aws.EndpointNotFoundError{})
+ if errors.As(err, &nf) {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+ return next.HandleSerialize(ctx, in)
+ }
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+ signingName := endpoint.SigningName
+ if len(signingName) == 0 {
+ signingName = "sso-oauth"
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ }
+ ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+ ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+ return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Serialize.Insert(&ResolveEndpoint{
+ Resolver: o.EndpointResolver,
+ Options: o.EndpointOptions,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+ return err
+}
+
+type wrappedEndpointResolver struct {
+ awsResolver aws.EndpointResolverWithOptions
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
+}
+
+type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
+
+func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
+ return a(service, region)
+}
+
+var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
+
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
+//
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
+ var resolver aws.EndpointResolverWithOptions
+
+ if awsResolverWithOptions != nil {
+ resolver = awsResolverWithOptions
+ } else if awsResolver != nil {
+ resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint)
+ }
+
+ return &wrappedEndpointResolver{
+ awsResolver: resolver,
+ }
+}
+
+func finalizeClientEndpointResolverOptions(options *Options) {
+ options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage()
+
+ if len(options.EndpointOptions.ResolvedRegion) == 0 {
+ const fipsInfix = "-fips-"
+ const fipsPrefix = "fips-"
+ const fipsSuffix = "-fips"
+
+ if strings.Contains(options.Region, fipsInfix) ||
+ strings.Contains(options.Region, fipsPrefix) ||
+ strings.Contains(options.Region, fipsSuffix) {
+ options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(
+ options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "")
+ options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+ }
+ }
+
+}
+
+func resolveEndpointResolverV2(options *Options) {
+ if options.EndpointResolverV2 == nil {
+ options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+ }
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+ if cfg.BaseEndpoint != nil {
+ o.BaseEndpoint = cfg.BaseEndpoint
+ }
+
+ _, g := os.LookupEnv("AWS_ENDPOINT_URL")
+ _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC")
+
+ if g && !s {
+ return
+ }
+
+ value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources)
+ if found && err == nil {
+ o.BaseEndpoint = &value
+ }
+}
+
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+ // The AWS region used to dispatch the request.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Region
+ Region *string
+
+ // When true, use the dual-stack endpoint. If the configured endpoint does not
+ // support dual-stack, dispatching the request MAY return an error.
+ //
+ // Defaults to
+ // false if no value is provided.
+ //
+ // AWS::UseDualStack
+ UseDualStack *bool
+
+ // When true, send this request to the FIPS-compliant regional endpoint. If the
+ // configured endpoint does not have a FIPS compliant endpoint, dispatching the
+ // request will return an error.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::UseFIPS
+ UseFIPS *bool
+
+ // Override the endpoint used to send this request
+ //
+ // Parameter is
+ // required.
+ //
+ // SDK::Endpoint
+ Endpoint *string
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+ if p.UseDualStack == nil {
+ return fmt.Errorf("parameter UseDualStack is required")
+ }
+
+ if p.UseFIPS == nil {
+ return fmt.Errorf("parameter UseFIPS is required")
+ }
+
+ return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+ if p.UseDualStack == nil {
+ p.UseDualStack = ptr.Bool(false)
+ }
+
+ if p.UseFIPS == nil {
+ p.UseFIPS = ptr.Bool(false)
+ }
+ return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+ // ResolveEndpoint attempts to resolve the endpoint with the provided options,
+ // returning the endpoint if found. Otherwise an error is returned.
+ ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+ smithyendpoints.Endpoint, error,
+ )
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+ return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+ ctx context.Context, params EndpointParameters,
+) (
+ endpoint smithyendpoints.Endpoint, err error,
+) {
+ params = params.WithDefaults()
+ if err = params.ValidateRequired(); err != nil {
+ return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+ }
+ _UseDualStack := *params.UseDualStack
+ _ = _UseDualStack
+ _UseFIPS := *params.UseFIPS
+ _ = _UseFIPS
+
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+ }
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+ }
+ uriString := _Endpoint
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _PartitionResult := *exprVal
+ _ = _PartitionResult
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsFIPS {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+ }
+ }
+ if _UseFIPS == true {
+ if _PartitionResult.SupportsFIPS == true {
+ if _PartitionResult.Name == "aws-us-gov" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+ }
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+ bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+ params := &EndpointParameters{}
+
+ params.Region = bindRegion(options.Region)
+ params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+ params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+ params.Endpoint = options.BaseEndpoint
+
+ if b, ok := input.(endpointParamsBinder); ok {
+ b.bindEndpointParams(params)
+ }
+
+ return params
+}
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveEndpoint")
+ defer span.End()
+
+ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.options.EndpointResolverV2 == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+ endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration",
+ func() (smithyendpoints.Endpoint, error) {
+ return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ span.SetProperty("client.call.resolved_endpoint", endpt.URI.String())
+
+ if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+ endpt.URI.RawPath = endpt.URI.Path
+ }
+ req.URL.Scheme = endpt.URI.Scheme
+ req.URL.Host = endpt.URI.Host
+ req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+ req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+ for k := range endpt.Headers {
+ req.Header.Set(k, endpt.Headers.Get(k))
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+ for _, o := range opts {
+ rscheme.SignerProperties.SetAll(&o.SignerProperties)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
new file mode 100644
index 000000000..ee79b48ea
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
@@ -0,0 +1,37 @@
+{
+ "dependencies": {
+ "github.com/aws/aws-sdk-go-v2": "v1.4.0",
+ "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
+ "github.com/aws/smithy-go": "v1.4.0"
+ },
+ "files": [
+ "api_client.go",
+ "api_client_test.go",
+ "api_op_CreateToken.go",
+ "api_op_CreateTokenWithIAM.go",
+ "api_op_RegisterClient.go",
+ "api_op_StartDeviceAuthorization.go",
+ "auth.go",
+ "deserializers.go",
+ "doc.go",
+ "endpoints.go",
+ "endpoints_config_test.go",
+ "endpoints_test.go",
+ "generated.json",
+ "internal/endpoints/endpoints.go",
+ "internal/endpoints/endpoints_test.go",
+ "options.go",
+ "protocol_test.go",
+ "serializers.go",
+ "snapshot_test.go",
+ "sra_operation_order_test.go",
+ "types/enums.go",
+ "types/errors.go",
+ "types/types.go",
+ "validators.go"
+ ],
+ "go": "1.23",
+ "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc",
+ "unstable": false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
new file mode 100644
index 000000000..d882f41dd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package ssooidc
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.35.5"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..f15c1a3ff
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
@@ -0,0 +1,603 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"
+ "github.com/aws/smithy-go/logging"
+ "regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+ // Logger is a logging implementation that log events should be sent to.
+ Logger logging.Logger
+
+ // LogDeprecated indicates that deprecated endpoints should be logged to the
+ // provided logger.
+ LogDeprecated bool
+
+ // ResolvedRegion is used to override the region to be resolved, rather then the
+ // using the value passed to the ResolveEndpoint method. This value is used by the
+ // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative
+ // name. You must not set this value directly in your application.
+ ResolvedRegion string
+
+ // DisableHTTPS informs the resolver to return an endpoint that does not use the
+ // HTTPS scheme.
+ DisableHTTPS bool
+
+ // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint.
+ UseDualStackEndpoint aws.DualStackEndpointState
+
+ // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+ UseFIPSEndpoint aws.FIPSEndpointState
+}
+
+func (o Options) GetResolvedRegion() string {
+ return o.ResolvedRegion
+}
+
+func (o Options) GetDisableHTTPS() bool {
+ return o.DisableHTTPS
+}
+
+func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState {
+ return o.UseDualStackEndpoint
+}
+
+func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState {
+ return o.UseFIPSEndpoint
+}
+
+func transformToSharedOptions(options Options) endpoints.Options {
+ return endpoints.Options{
+ Logger: options.Logger,
+ LogDeprecated: options.LogDeprecated,
+ ResolvedRegion: options.ResolvedRegion,
+ DisableHTTPS: options.DisableHTTPS,
+ UseDualStackEndpoint: options.UseDualStackEndpoint,
+ UseFIPSEndpoint: options.UseFIPSEndpoint,
+ }
+}
+
+// Resolver SSO OIDC endpoint resolver
+type Resolver struct {
+ partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+ if len(region) == 0 {
+ return endpoint, &aws.MissingRegionError{}
+ }
+
+ opt := transformToSharedOptions(options)
+ return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+}
+
+var partitionRegexp = struct {
+ Aws *regexp.Regexp
+ AwsCn *regexp.Regexp
+ AwsEusc *regexp.Regexp
+ AwsIso *regexp.Regexp
+ AwsIsoB *regexp.Regexp
+ AwsIsoE *regexp.Regexp
+ AwsIsoF *regexp.Regexp
+ AwsUsGov *regexp.Regexp
+}{
+
+ Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"),
+ AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"),
+ AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+ AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+ AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
+ AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+}
+
+var defaultPartitions = endpoints.Partitions{
+ {
+ ID: "aws",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "oidc.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.Aws,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "af-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.af-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-northeast-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-northeast-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-northeast-3",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-northeast-3.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-northeast-3",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-south-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-south-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-south-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-southeast-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-southeast-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-3",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-southeast-3.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-southeast-4.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-5",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-southeast-5.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-5",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-7",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ca-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ca-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-central-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-central-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-north-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-north-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-south-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-south-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "eu-west-3",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-west-3.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "il-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.il-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.me-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "me-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.me-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "mx-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "sa-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.sa-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.us-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.us-east-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.us-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.us-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "oidc.{region}.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsCn,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "cn-north-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.cn-north-1.amazonaws.com.cn",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "cn-northwest-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ {
+ ID: "aws-eusc",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsEusc,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIso,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-b",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.sc2s.sgov.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.sc2s.sgov.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoB,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-e",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoE,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-f",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoF,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-us-gov",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "oidc.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsUsGov,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.us-gov-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.us-gov-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
new file mode 100644
index 000000000..f35f3d5a3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
@@ -0,0 +1,239 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+)
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // The optional application specific identifier appended to the User-Agent header.
+ AppID string
+
+ // This endpoint will be given as input to an EndpointResolverV2. It is used for
+ // providing a custom base endpoint that is subject to modifications by the
+ // processing EndpointResolverV2.
+ BaseEndpoint *string
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The configuration DefaultsMode that the SDK should use when constructing the
+ // clients initial default settings.
+ DefaultsMode aws.DefaultsMode
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ //
+ // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+ // value for this field will likely prevent you from using any endpoint-related
+ // service features released after the introduction of EndpointResolverV2 and
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
+ EndpointResolver EndpointResolver
+
+ // Resolves the endpoint used for a particular service operation. This should be
+ // used over the deprecated EndpointResolver.
+ EndpointResolverV2 EndpointResolverV2
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The client meter provider.
+ MeterProvider metrics.MeterProvider
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client will call
+ // an operation that fails with a retryable error. A value of 0 is ignored, and
+ // will not be used to configure the API client created default retryer, or modify
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry mode the API client will be created with, if
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ RetryMode aws.RetryMode
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer. The kind of
+ // default retry created by the API client can be changed with the RetryMode
+ // option.
+ Retryer aws.Retryer
+
+ // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+ // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+ // should not populate this structure programmatically, or rely on the values here
+ // within your applications.
+ RuntimeEnvironment aws.RuntimeEnvironment
+
+ // The client tracer provider.
+ TracerProvider tracing.TracerProvider
+
+ // The initial DefaultsMode used when the client options were constructed. If the
+ // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ resolvedDefaultsMode aws.DefaultsMode
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // Client registry of operation interceptors.
+ Interceptors smithyhttp.InterceptorRegistry
+
+ // The auth scheme resolver which determines how to authenticate for each
+ // operation.
+ AuthSchemeResolver AuthSchemeResolver
+
+ // The list of auth schemes supported by the client.
+ AuthSchemes []smithyhttp.AuthScheme
+
+ // Priority list of preferred auth scheme names (e.g. sigv4a).
+ AuthSchemePreference []string
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+ to.Interceptors = o.Interceptors.Copy()
+
+ return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+ if schemeID == "aws.auth#sigv4" {
+ return getSigV4IdentityResolver(o)
+ }
+ if schemeID == "smithy.api#noAuth" {
+ return &smithyauth.AnonymousIdentityResolver{}
+ }
+ return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolverV2 = v
+ }
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.Credentials != nil {
+ return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+ }
+ return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+func ignoreAnonymousAuth(options *Options) {
+ if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+ options.Credentials = nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go
new file mode 100644
index 000000000..1ad103d1e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go
@@ -0,0 +1,512 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ smithyjson "github.com/aws/smithy-go/encoding/json"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+type awsRestjson1_serializeOpCreateToken struct {
+}
+
+func (*awsRestjson1_serializeOpCreateToken) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateTokenInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/token")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ restEncoder.SetHeader("Content-Type").String("application/json")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ return nil
+}
+
+func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ClientId != nil {
+ ok := object.Key("clientId")
+ ok.String(*v.ClientId)
+ }
+
+ if v.ClientSecret != nil {
+ ok := object.Key("clientSecret")
+ ok.String(*v.ClientSecret)
+ }
+
+ if v.Code != nil {
+ ok := object.Key("code")
+ ok.String(*v.Code)
+ }
+
+ if v.CodeVerifier != nil {
+ ok := object.Key("codeVerifier")
+ ok.String(*v.CodeVerifier)
+ }
+
+ if v.DeviceCode != nil {
+ ok := object.Key("deviceCode")
+ ok.String(*v.DeviceCode)
+ }
+
+ if v.GrantType != nil {
+ ok := object.Key("grantType")
+ ok.String(*v.GrantType)
+ }
+
+ if v.RedirectUri != nil {
+ ok := object.Key("redirectUri")
+ ok.String(*v.RedirectUri)
+ }
+
+ if v.RefreshToken != nil {
+ ok := object.Key("refreshToken")
+ ok.String(*v.RefreshToken)
+ }
+
+ if v.Scope != nil {
+ ok := object.Key("scope")
+ if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type awsRestjson1_serializeOpCreateTokenWithIAM struct {
+}
+
+func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateTokenWithIAMInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/token?aws_iam=t")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ restEncoder.SetHeader("Content-Type").String("application/json")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ return nil
+}
+
+func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Assertion != nil {
+ ok := object.Key("assertion")
+ ok.String(*v.Assertion)
+ }
+
+ if v.ClientId != nil {
+ ok := object.Key("clientId")
+ ok.String(*v.ClientId)
+ }
+
+ if v.Code != nil {
+ ok := object.Key("code")
+ ok.String(*v.Code)
+ }
+
+ if v.CodeVerifier != nil {
+ ok := object.Key("codeVerifier")
+ ok.String(*v.CodeVerifier)
+ }
+
+ if v.GrantType != nil {
+ ok := object.Key("grantType")
+ ok.String(*v.GrantType)
+ }
+
+ if v.RedirectUri != nil {
+ ok := object.Key("redirectUri")
+ ok.String(*v.RedirectUri)
+ }
+
+ if v.RefreshToken != nil {
+ ok := object.Key("refreshToken")
+ ok.String(*v.RefreshToken)
+ }
+
+ if v.RequestedTokenType != nil {
+ ok := object.Key("requestedTokenType")
+ ok.String(*v.RequestedTokenType)
+ }
+
+ if v.Scope != nil {
+ ok := object.Key("scope")
+ if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.SubjectToken != nil {
+ ok := object.Key("subjectToken")
+ ok.String(*v.SubjectToken)
+ }
+
+ if v.SubjectTokenType != nil {
+ ok := object.Key("subjectTokenType")
+ ok.String(*v.SubjectTokenType)
+ }
+
+ return nil
+}
+
+type awsRestjson1_serializeOpRegisterClient struct {
+}
+
+func (*awsRestjson1_serializeOpRegisterClient) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*RegisterClientInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/client/register")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ restEncoder.SetHeader("Content-Type").String("application/json")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ return nil
+}
+
+func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ClientName != nil {
+ ok := object.Key("clientName")
+ ok.String(*v.ClientName)
+ }
+
+ if v.ClientType != nil {
+ ok := object.Key("clientType")
+ ok.String(*v.ClientType)
+ }
+
+ if v.EntitledApplicationArn != nil {
+ ok := object.Key("entitledApplicationArn")
+ ok.String(*v.EntitledApplicationArn)
+ }
+
+ if v.GrantTypes != nil {
+ ok := object.Key("grantTypes")
+ if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.IssuerUrl != nil {
+ ok := object.Key("issuerUrl")
+ ok.String(*v.IssuerUrl)
+ }
+
+ if v.RedirectUris != nil {
+ ok := object.Key("redirectUris")
+ if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Scopes != nil {
+ ok := object.Key("scopes")
+ if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type awsRestjson1_serializeOpStartDeviceAuthorization struct {
+}
+
+func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*StartDeviceAuthorizationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/device_authorization")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ restEncoder.SetHeader("Content-Type").String("application/json")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ return nil
+}
+
+func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ClientId != nil {
+ ok := object.Key("clientId")
+ ok.String(*v.ClientId)
+ }
+
+ if v.ClientSecret != nil {
+ ok := object.Key("clientSecret")
+ ok.String(*v.ClientSecret)
+ }
+
+ if v.StartUrl != nil {
+ ok := object.Key("startUrl")
+ ok.String(*v.StartUrl)
+ }
+
+ return nil
+}
+
+func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/enums.go
new file mode 100644
index 000000000..b14a3c058
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/enums.go
@@ -0,0 +1,44 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+type AccessDeniedExceptionReason string
+
+// Enum values for AccessDeniedExceptionReason
+const (
+ AccessDeniedExceptionReasonKmsAccessDenied AccessDeniedExceptionReason = "KMS_AccessDeniedException"
+)
+
+// Values returns all known values for AccessDeniedExceptionReason. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (AccessDeniedExceptionReason) Values() []AccessDeniedExceptionReason {
+ return []AccessDeniedExceptionReason{
+ "KMS_AccessDeniedException",
+ }
+}
+
+type InvalidRequestExceptionReason string
+
+// Enum values for InvalidRequestExceptionReason
+const (
+ InvalidRequestExceptionReasonKmsKeyNotFound InvalidRequestExceptionReason = "KMS_NotFoundException"
+ InvalidRequestExceptionReasonKmsInvalidKeyUsage InvalidRequestExceptionReason = "KMS_InvalidKeyUsageException"
+ InvalidRequestExceptionReasonKmsInvalidState InvalidRequestExceptionReason = "KMS_InvalidStateException"
+ InvalidRequestExceptionReasonKmsDisabledKey InvalidRequestExceptionReason = "KMS_DisabledException"
+)
+
+// Values returns all known values for InvalidRequestExceptionReason. Note that
+// this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (InvalidRequestExceptionReason) Values() []InvalidRequestExceptionReason {
+ return []InvalidRequestExceptionReason{
+ "KMS_NotFoundException",
+ "KMS_InvalidKeyUsageException",
+ "KMS_InvalidStateException",
+ "KMS_DisabledException",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go
new file mode 100644
index 000000000..a1a3c7ef0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go
@@ -0,0 +1,430 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+)
+
+// You do not have sufficient access to perform this action.
+type AccessDeniedException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Reason AccessDeniedExceptionReason
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *AccessDeniedException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *AccessDeniedException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *AccessDeniedException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "AccessDeniedException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that a request to authorize a client with an access user session
+// token is pending.
+type AuthorizationPendingException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *AuthorizationPendingException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *AuthorizationPendingException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *AuthorizationPendingException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "AuthorizationPendingException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the token issued by the service is expired and is no longer
+// valid.
+type ExpiredTokenException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ExpiredTokenException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ExpiredTokenException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ExpiredTokenException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ExpiredTokenException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that an error from the service occurred while trying to process a
+// request.
+type InternalServerException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InternalServerException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InternalServerException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InternalServerException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InternalServerException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer }
+
+// Indicates that the clientId or clientSecret in the request is invalid. For
+// example, this can occur when a client sends an incorrect clientId or an expired
+// clientSecret .
+type InvalidClientException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidClientException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidClientException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidClientException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidClientException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the client information sent in the request during registration
+// is invalid.
+type InvalidClientMetadataException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidClientMetadataException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidClientMetadataException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidClientMetadataException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidClientMetadataException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that a request contains an invalid grant. This can occur if a client
+// makes a CreateTokenrequest with an invalid grant type.
+type InvalidGrantException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidGrantException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidGrantException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidGrantException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidGrantException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+type InvalidRedirectUriException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidRedirectUriException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRedirectUriException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRedirectUriException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRedirectUriException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that something is wrong with the input to the request. For example, a
+// required parameter might be missing or out of range.
+type InvalidRequestException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Reason InvalidRequestExceptionReason
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidRequestException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRequestException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRequestException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRequestException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that a token provided as input to the request was issued by and is
+// only usable by calling IAM Identity Center endpoints in another region.
+type InvalidRequestRegionException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+ Endpoint *string
+ Region *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidRequestRegionException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRequestRegionException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRequestRegionException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRequestRegionException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidRequestRegionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the scope provided in the request is invalid.
+type InvalidScopeException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidScopeException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidScopeException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidScopeException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidScopeException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the client is making the request too frequently and is more than
+// the service can handle.
+type SlowDownException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *SlowDownException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *SlowDownException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *SlowDownException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "SlowDownException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the client is not currently authorized to make the request. This
+// can happen when a clientId is not issued for a public client.
+type UnauthorizedClientException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *UnauthorizedClientException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *UnauthorizedClientException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *UnauthorizedClientException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "UnauthorizedClientException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Indicates that the grant type in the request is not supported by the service.
+type UnsupportedGrantTypeException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *UnsupportedGrantTypeException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *UnsupportedGrantTypeException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *UnsupportedGrantTypeException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "UnsupportedGrantTypeException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go
new file mode 100644
index 000000000..de15e8f05
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go
@@ -0,0 +1,25 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ smithydocument "github.com/aws/smithy-go/document"
+)
+
+// This structure contains Amazon Web Services-specific parameter extensions and
+// the [identity context].
+//
+// [identity context]: https://docs.aws.amazon.com/singlesignon/latest/userguide/trustedidentitypropagation-overview.html
+type AwsAdditionalDetails struct {
+
+ // The trusted context assertion is signed and encrypted by STS. It provides
+ // access to sts:identity_context claim in the idToken without JWT parsing
+ //
+ // Identity context comprises information that Amazon Web Services services use to
+ // make authorization decisions when they receive requests.
+ IdentityContext *string
+
+ noSmithyDocumentSerde
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go
new file mode 100644
index 000000000..9c17e4c8e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go
@@ -0,0 +1,184 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type validateOpCreateToken struct {
+}
+
+func (*validateOpCreateToken) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateTokenInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateTokenInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateTokenWithIAM struct {
+}
+
+func (*validateOpCreateTokenWithIAM) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateTokenWithIAM) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateTokenWithIAMInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateTokenWithIAMInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpRegisterClient struct {
+}
+
+func (*validateOpRegisterClient) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*RegisterClientInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpRegisterClientInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpStartDeviceAuthorization struct {
+}
+
+func (*validateOpStartDeviceAuthorization) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*StartDeviceAuthorizationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpStartDeviceAuthorizationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After)
+}
+
+func addOpCreateTokenWithIAMValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateTokenWithIAM{}, middleware.After)
+}
+
+func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After)
+}
+
+func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After)
+}
+
+func validateOpCreateTokenInput(v *CreateTokenInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"}
+ if v.ClientId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ClientId"))
+ }
+ if v.ClientSecret == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ClientSecret"))
+ }
+ if v.GrantType == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("GrantType"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateTokenWithIAMInput(v *CreateTokenWithIAMInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateTokenWithIAMInput"}
+ if v.ClientId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ClientId"))
+ }
+ if v.GrantType == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("GrantType"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpRegisterClientInput(v *RegisterClientInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"}
+ if v.ClientName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ClientName"))
+ }
+ if v.ClientType == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ClientType"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"}
+ if v.ClientId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ClientId"))
+ }
+ if v.ClientSecret == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ClientSecret"))
+ }
+ if v.StartUrl == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("StartUrl"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
new file mode 100644
index 000000000..9ae35f449
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
@@ -0,0 +1,733 @@
+# v1.39.1 (2025-11-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.23.2 which should convey some passive reduction of overall allocations, especially when not using the metrics system.
+
+# v1.39.0 (2025-10-30)
+
+* **Feature**: Update endpoint ruleset parameters casing
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.9 (2025-10-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.8 (2025-10-22)
+
+* No change notes available for this release.
+
+# v1.38.7 (2025-10-16)
+
+* **Dependency Update**: Bump minimum Go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.6 (2025-09-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.5 (2025-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.4 (2025-09-10)
+
+* No change notes available for this release.
+
+# v1.38.3 (2025-09-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.2 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.1 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.0 (2025-08-21)
+
+* **Feature**: Remove incorrect endpoint tests
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.37.1 (2025-08-20)
+
+* **Bug Fix**: Remove unused deserialization code.
+
+# v1.37.0 (2025-08-11)
+
+* **Feature**: Add support for configuring per-service Options via callback on global config.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.36.0 (2025-08-04)
+
+* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.35.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.35.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.1 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.0 (2025-06-17)
+
+* **Feature**: The AWS Security Token Service APIs AssumeRoleWithSAML and AssumeRoleWithWebIdentity can now be invoked without pre-configured AWS credentials in the SDK configuration.
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.21 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.20 (2025-06-06)
+
+* No change notes available for this release.
+
+# v1.33.19 (2025-04-10)
+
+* No change notes available for this release.
+
+# v1.33.18 (2025-04-03)
+
+* No change notes available for this release.
+
+# v1.33.17 (2025-03-04.2)
+
+* **Bug Fix**: Add assurance test for operation order.
+
+# v1.33.16 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.15 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.14 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.13 (2025-02-04)
+
+* No change notes available for this release.
+
+# v1.33.12 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.11 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.10 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.33.9 (2025-01-17)
+
+* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop.
+
+# v1.33.8 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.7 (2025-01-14)
+
+* No change notes available for this release.
+
+# v1.33.6 (2025-01-10)
+
+* **Documentation**: Fixed typos in the descriptions.
+
+# v1.33.5 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.4 (2025-01-08)
+
+* No change notes available for this release.
+
+# v1.33.3 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.2 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.1 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.0 (2024-11-14)
+
+* **Feature**: This release introduces the new API 'AssumeRoot', which returns short-term credentials that you can use to perform privileged tasks.
+
+# v1.32.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.4 (2024-10-03)
+
+* No change notes available for this release.
+
+# v1.31.3 (2024-09-27)
+
+* No change notes available for this release.
+
+# v1.31.2 (2024-09-25)
+
+* No change notes available for this release.
+
+# v1.31.1 (2024-09-23)
+
+* No change notes available for this release.
+
+# v1.31.0 (2024-09-20)
+
+* **Feature**: Add tracing and metrics support to service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.8 (2024-09-17)
+
+* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution.
+
+# v1.30.7 (2024-09-04)
+
+* No change notes available for this release.
+
+# v1.30.6 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.5 (2024-08-22)
+
+* No change notes available for this release.
+
+# v1.30.4 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.3 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.2 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.29.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.13 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.12 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.11 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.10 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.28.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.7 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.28.6 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.5 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.4 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.3 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2024-03-04)
+
+* **Bug Fix**: Update internal/presigned-url dependency for corrected API name.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.27.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.27.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.7 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2023-12-20)
+
+* No change notes available for this release.
+
+# v1.26.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.26.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+* **Bug Fix**: STS `AssumeRoleWithSAML` and `AssumeRoleWithWebIdentity` would incorrectly attempt to use SigV4 authentication.
+
+# v1.26.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Documentation**: Documentation updates for AWS Security Token Service.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.6 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.5 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.25.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.3 (2023-11-17)
+
+* **Documentation**: API updates for the AWS Security Token Service
+
+# v1.25.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.2 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.1 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2023-10-02)
+
+* **Feature**: STS API updates for assumeRole
+
+# v1.22.0 (2023-09-18)
+
+* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
+* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
+
+# v1.21.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.21.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.1 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-07-25)
+
+* **Feature**: API updates for the AWS Security Token Service
+
+# v1.19.3 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.2 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.19.1 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.0 (2023-05-08)
+
+* **Feature**: Documentation updates for AWS Security Token Service.
+
+# v1.18.11 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.18.10 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.9 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.18.8 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.6 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.18.4 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization.
+
+# v1.18.2 (2023-01-25)
+
+* **Documentation**: Doc only change to update wording in a key topic
+
+# v1.18.1 (2023-01-23)
+
+* No change notes available for this release.
+
+# v1.18.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.17.7 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2022-11-22)
+
+* No change notes available for this release.
+
+# v1.17.4 (2022-11-17)
+
+* **Documentation**: Documentation updates for AWS Security Token Service.
+
+# v1.17.3 (2022-11-16)
+
+* No change notes available for this release.
+
+# v1.17.2 (2022-11-10)
+
+* No change notes available for this release.
+
+# v1.17.1 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2022-10-21)
+
+* **Feature**: Add presign functionality for sts:AssumeRole operation
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.19 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.18 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.17 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.16 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.15 (2022-08-30)
+
+* No change notes available for this release.
+
+# v1.16.14 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.12 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.11 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2022-05-16)
+
+* **Documentation**: Documentation updates for AWS Security Token Service.
+
+# v1.16.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Documentation**: Updated service client model to latest release.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2022-02-24)
+
+* **Feature**: API client updated
+* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2021-12-21)
+
+* **Feature**: Updated to latest service endpoints
+
+# v1.11.1 (2021-12-02)
+
+* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2021-11-30)
+
+* **Feature**: API client updated
+
+# v1.10.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2021-11-12)
+
+* **Feature**: Service clients now support custom endpoints that have an initial URI path defined.
+
+# v1.9.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-10-21)
+
+* **Feature**: API client updated
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.2 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.2 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.1 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-07-15)
+
+* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
+* **Documentation**: Updated service model to latest revision.
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-06-25)
+
+* **Feature**: API client updated
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
new file mode 100644
index 000000000..6658babc9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
@@ -0,0 +1,1171 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/query"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
+ acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"
+ presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithydocument "github.com/aws/smithy-go/document"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net"
+ "net/http"
+ "sync/atomic"
+ "time"
+)
+
+const ServiceID = "STS"
+const ServiceAPIVersion = "2011-06-15"
+
+type operationMetrics struct {
+ Duration metrics.Float64Histogram
+ SerializeDuration metrics.Float64Histogram
+ ResolveIdentityDuration metrics.Float64Histogram
+ ResolveEndpointDuration metrics.Float64Histogram
+ SignRequestDuration metrics.Float64Histogram
+ DeserializeDuration metrics.Float64Histogram
+}
+
+func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram {
+ switch name {
+ case "client.call.duration":
+ return m.Duration
+ case "client.call.serialization_duration":
+ return m.SerializeDuration
+ case "client.call.resolve_identity_duration":
+ return m.ResolveIdentityDuration
+ case "client.call.resolve_endpoint_duration":
+ return m.ResolveEndpointDuration
+ case "client.call.signing_duration":
+ return m.SignRequestDuration
+ case "client.call.deserialization_duration":
+ return m.DeserializeDuration
+ default:
+ panic("unrecognized operation metric")
+ }
+}
+
+func timeOperationMetric[T any](
+ ctx context.Context, metric string, fn func() (T, error),
+ opts ...metrics.RecordMetricOption,
+) (T, error) {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ start := time.Now()
+ v, err := fn()
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ return v, err
+}
+
+func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ var ended bool
+ start := time.Now()
+ return func() {
+ if ended {
+ return
+ }
+ ended = true
+
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ }
+}
+
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
+ }
+}
+
+type operationMetricsKey struct{}
+
+func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) {
+ meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sts")
+ om := &operationMetrics{}
+
+ var err error
+
+ om.Duration, err = operationMetricTimer(meter, "client.call.duration",
+ "Overall call duration (including retries and time to send or receive request and response body)")
+ if err != nil {
+ return nil, err
+ }
+ om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration",
+ "The time it takes to serialize a message body")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration",
+ "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration",
+ "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request")
+ if err != nil {
+ return nil, err
+ }
+ om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration",
+ "The time it takes to sign a request")
+ if err != nil {
+ return nil, err
+ }
+ om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration",
+ "The time it takes to deserialize a message body")
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(parent, operationMetricsKey{}, om), nil
+}
+
+func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) {
+ return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = desc
+ })
+}
+
+func getOperationMetrics(ctx context.Context) *operationMetrics {
+ return ctx.Value(operationMetricsKey{}).(*operationMetrics)
+}
+
+func operationTracer(p tracing.TracerProvider) tracing.Tracer {
+ return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sts")
+}
+
+// Client provides the API client to make operations call for AWS Security Token
+// Service.
+type Client struct {
+ options Options
+
+ // Difference between the time reported by the server and the client
+ timeOffset *atomic.Int64
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ resolveDefaultLogger(&options)
+
+ setResolvedDefaultsMode(&options)
+
+ resolveRetryer(&options)
+
+ resolveHTTPClient(&options)
+
+ resolveHTTPSignerV4(&options)
+
+ resolveEndpointResolverV2(&options)
+
+ resolveTracerProvider(&options)
+
+ resolveMeterProvider(&options)
+
+ resolveAuthSchemeResolver(&options)
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ finalizeRetryMaxAttempts(&options)
+
+ ignoreAnonymousAuth(&options)
+
+ wrapWithAnonymousAuth(&options)
+
+ resolveAuthSchemes(&options)
+
+ client := &Client{
+ options: options,
+ }
+
+ initializeTimeOffsetResolver(client)
+
+ return client
+}
+
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+ return c.options.Copy()
+}
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
+ ctx = middleware.ClearStackValues(ctx)
+ ctx = middleware.WithServiceID(ctx, ServiceID)
+ ctx = middleware.WithOperationName(ctx, opID)
+
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ finalizeOperationRetryMaxAttempts(&options, *c)
+
+ finalizeClientEndpointResolverOptions(&options)
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ ctx, err = withOperationMetrics(ctx, options.MeterProvider)
+ if err != nil {
+ return nil, metadata, err
+ }
+
+ tracer := operationTracer(options.TracerProvider)
+ spanName := fmt.Sprintf("%s.%s", ServiceID, opID)
+
+ ctx = tracing.WithOperationTracer(ctx, tracer)
+
+ ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) {
+ o.Kind = tracing.SpanKindClient
+ o.Properties.Set("rpc.system", "aws-api")
+ o.Properties.Set("rpc.method", opID)
+ o.Properties.Set("rpc.service", ServiceID)
+ })
+ endTimer := startMetricTimer(ctx, "client.call.duration")
+ defer endTimer()
+ defer span.End()
+
+ handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) {
+ o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts")
+ })
+ decorated := middleware.DecorateHandler(handler, stack)
+ result, metadata, err = decorated.Handle(ctx, params)
+ if err != nil {
+ span.SetProperty("exception.type", fmt.Sprintf("%T", err))
+ span.SetProperty("exception.message", err.Error())
+
+ var aerr smithy.APIError
+ if errors.As(err, &aerr) {
+ span.SetProperty("api.error_code", aerr.ErrorCode())
+ span.SetProperty("api.error_message", aerr.ErrorMessage())
+ span.SetProperty("api.error_fault", aerr.ErrorFault().String())
+ }
+
+ err = &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+
+ span.SetProperty("error", err != nil)
+ if err == nil {
+ span.SetStatus(tracing.SpanStatusOK)
+ } else {
+ span.SetStatus(tracing.SpanStatusError)
+ }
+
+ return result, metadata, err
+}
+
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+ return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+ return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+ return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = setOperationInput(ctx, in.Parameters)
+ return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %v", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %v", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+ if options.AuthSchemeResolver == nil {
+ options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+ }
+}
+
+func resolveAuthSchemes(options *Options) {
+ if options.AuthSchemes == nil {
+ options.AuthSchemes = []smithyhttp.AuthScheme{
+ internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+ Signer: options.HTTPSignerV4,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ }
+ }
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+type legacyEndpointContextSetter struct {
+ LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+ return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.LegacyResolver != nil {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+ }
+
+ return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+ return stack.Initialize.Add(&legacyEndpointContextSetter{
+ LegacyResolver: o.EndpointResolver,
+ }, middleware.Before)
+}
+
+func resolveDefaultLogger(o *Options) {
+ if o.Logger != nil {
+ return
+ }
+ o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+func setResolvedDefaultsMode(o *Options) {
+ if len(o.resolvedDefaultsMode) > 0 {
+ return
+ }
+
+ var mode aws.DefaultsMode
+ mode.SetFromString(string(o.DefaultsMode))
+
+ if mode == aws.DefaultsModeAuto {
+ mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
+ }
+
+ o.resolvedDefaultsMode = mode
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ Region: cfg.Region,
+ DefaultsMode: cfg.DefaultsMode,
+ RuntimeEnvironment: cfg.RuntimeEnvironment,
+ HTTPClient: cfg.HTTPClient,
+ Credentials: cfg.Credentials,
+ APIOptions: cfg.APIOptions,
+ Logger: cfg.Logger,
+ ClientLogMode: cfg.ClientLogMode,
+ AppID: cfg.AppID,
+ AuthSchemePreference: cfg.AuthSchemePreference,
+ }
+ resolveAWSRetryerProvider(cfg, &opts)
+ resolveAWSRetryMaxAttempts(cfg, &opts)
+ resolveAWSRetryMode(cfg, &opts)
+ resolveAWSEndpointResolver(cfg, &opts)
+ resolveInterceptors(cfg, &opts)
+ resolveUseDualStackEndpoint(cfg, &opts)
+ resolveUseFIPSEndpoint(cfg, &opts)
+ resolveBaseEndpoint(cfg, &opts)
+ return New(opts, func(o *Options) {
+ for _, opt := range cfg.ServiceOptions {
+ opt(ServiceID, o)
+ }
+ for _, opt := range optFns {
+ opt(o)
+ }
+ })
+}
+
+func resolveHTTPClient(o *Options) {
+ var buildable *awshttp.BuildableClient
+
+ if o.HTTPClient != nil {
+ var ok bool
+ buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
+ if !ok {
+ return
+ }
+ } else {
+ buildable = awshttp.NewBuildableClient()
+ }
+
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
+ if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
+ dialer.Timeout = dialerTimeout
+ }
+ })
+
+ buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
+ if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
+ transport.TLSHandshakeTimeout = tlsHandshakeTimeout
+ }
+ })
+ }
+
+ o.HTTPClient = buildable
+}
+
+func resolveRetryer(o *Options) {
+ if o.Retryer != nil {
+ return
+ }
+
+ if len(o.RetryMode) == 0 {
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ o.RetryMode = modeConfig.RetryMode
+ }
+ }
+ if len(o.RetryMode) == 0 {
+ o.RetryMode = aws.RetryModeStandard
+ }
+
+ var standardOptions []func(*retry.StandardOptions)
+ if v := o.RetryMaxAttempts; v != 0 {
+ standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
+ so.MaxAttempts = v
+ })
+ }
+
+ switch o.RetryMode {
+ case aws.RetryModeAdaptive:
+ var adaptiveOptions []func(*retry.AdaptiveModeOptions)
+ if len(standardOptions) != 0 {
+ adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
+ ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
+ })
+ }
+ o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
+
+ default:
+ o.Retryer = retry.NewStandard(standardOptions...)
+ }
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+ if cfg.Retryer == nil {
+ return
+ }
+ o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSRetryMode(cfg aws.Config, o *Options) {
+ if len(cfg.RetryMode) == 0 {
+ return
+ }
+ o.RetryMode = cfg.RetryMode
+}
+func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
+ if cfg.RetryMaxAttempts == 0 {
+ return
+ }
+ o.RetryMaxAttempts = cfg.RetryMaxAttempts
+}
+
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
+ if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+ if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
+ return
+ }
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
+}
+
+func resolveInterceptors(cfg aws.Config, o *Options) {
+ o.Interceptors = cfg.Interceptors.Copy()
+}
+
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)
+ if len(options.AppID) > 0 {
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+ }
+
+ return nil
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
+
+type HTTPSignerV4 interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+ if o.HTTPSignerV4 != nil {
+ return
+ }
+ o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+ return v4.NewSigner(func(so *v4.SignerOptions) {
+ so.Logger = o.Logger
+ so.LogSigning = o.ClientLogMode.IsSigning()
+ })
+}
+
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+
+func addSpanRetryLoop(stack *middleware.Stack, options Options) error {
+ return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before)
+}
+
+type spanRetryLoop struct {
+ options Options
+}
+
+func (*spanRetryLoop) ID() string {
+ return "spanRetryLoop"
+}
+
+func (m *spanRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ middleware.FinalizeOutput, middleware.Metadata, error,
+) {
+ tracer := operationTracer(m.options.TracerProvider)
+ ctx, span := tracer.StartSpan(ctx, "RetryLoop")
+ defer span.End()
+
+ return next.HandleFinalize(ctx, in)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+ return nil
+ })
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+ return nil
+ })
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts")
+ })
+ if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
+ }
+ return nil
+}
+
+// resolves dual-stack endpoint configuration
+func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseDualStackEndpoint = value
+ }
+ return nil
+}
+
+// resolves FIPS endpoint configuration
+func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseFIPSEndpoint = value
+ }
+ return nil
+}
+
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+ if mode == aws.AccountIDEndpointModeDisabled {
+ return nil
+ }
+
+ if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+ return aws.String(ca.Credentials.AccountID)
+ }
+
+ return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+ mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+ if err := stack.Build.Add(&mw, middleware.After); err != nil {
+ return err
+ }
+ return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+ c.timeOffset = new(atomic.Int64)
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ switch options.Retryer.(type) {
+ case *retry.Standard:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+ case *retry.AdaptiveMode:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+ }
+ return nil
+}
+
+type setCredentialSourceMiddleware struct {
+ ua *awsmiddleware.RequestUserAgent
+ options Options
+}
+
+func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" }
+
+func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource)
+ if !ok {
+ return next.HandleBuild(ctx, in)
+ }
+ providerSources := asProviderSource.ProviderSources()
+ for _, source := range providerSources {
+ m.ua.AddCredentialsSource(source)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+func addCredentialSource(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ mw := setCredentialSourceMiddleware{ua: ua, options: options}
+ return stack.Build.Insert(&mw, "UserAgent", middleware.Before)
+}
+
+func resolveTracerProvider(options *Options) {
+ if options.TracerProvider == nil {
+ options.TracerProvider = &tracing.NopTracerProvider{}
+ }
+}
+
+func resolveMeterProvider(options *Options) {
+ if options.MeterProvider == nil {
+ options.MeterProvider = metrics.NopMeterProvider{}
+ }
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
+func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
+}
+
+// HTTPPresignerV4 represents presigner interface used by presign url client
+type HTTPPresignerV4 interface {
+ PresignHTTP(
+ ctx context.Context, credentials aws.Credentials, r *http.Request,
+ payloadHash string, service string, region string, signingTime time.Time,
+ optFns ...func(*v4.SignerOptions),
+ ) (url string, signedHeader http.Header, err error)
+}
+
+// PresignOptions represents the presign client options
+type PresignOptions struct {
+
+ // ClientOptions are list of functional options to mutate client options used by
+ // the presign client.
+ ClientOptions []func(*Options)
+
+ // Presigner is the presigner used by the presign url client
+ Presigner HTTPPresignerV4
+}
+
+func (o PresignOptions) copy() PresignOptions {
+ clientOptions := make([]func(*Options), len(o.ClientOptions))
+ copy(clientOptions, o.ClientOptions)
+ o.ClientOptions = clientOptions
+ return o
+}
+
+// WithPresignClientFromClientOptions is a helper utility to retrieve a function
+// that takes PresignOption as input
+func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) {
+ return withPresignClientFromClientOptions(optFns).options
+}
+
+type withPresignClientFromClientOptions []func(*Options)
+
+func (w withPresignClientFromClientOptions) options(o *PresignOptions) {
+ o.ClientOptions = append(o.ClientOptions, w...)
+}
+
+// PresignClient represents the presign url client
+type PresignClient struct {
+ client *Client
+ options PresignOptions
+}
+
+// NewPresignClient generates a presign client using provided API Client and
+// presign options
+func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient {
+ var options PresignOptions
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ if len(options.ClientOptions) != 0 {
+ c = New(c.options, options.ClientOptions...)
+ }
+
+ if options.Presigner == nil {
+ options.Presigner = newDefaultV4Signer(c.options)
+ }
+
+ return &PresignClient{
+ client: c,
+ options: options,
+ }
+}
+
+func withNopHTTPClientAPIOption(o *Options) {
+ o.HTTPClient = smithyhttp.NopClient{}
+}
+
+type presignContextPolyfillMiddleware struct {
+}
+
+func (*presignContextPolyfillMiddleware) ID() string {
+ return "presignContextPolyfill"
+}
+
+func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" {
+ if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningName(ctx, sn)
+ }
+ if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningRegion(ctx, sr)
+ }
+ } else if schemeID == "aws.auth#sigv4a" {
+ if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningName(ctx, sn)
+ }
+ if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningRegion(ctx, sr[0])
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+type presignConverter PresignOptions
+
+func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) {
+ if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok {
+ stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID())
+ }
+ if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok {
+ stack.Finalize.Remove((*retry.Attempt)(nil).ID())
+ }
+ if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok {
+ stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID())
+ }
+ stack.Deserialize.Clear()
+ stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID())
+ stack.Build.Remove("UserAgent")
+ if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil {
+ return err
+ }
+
+ pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{
+ CredentialsProvider: options.Credentials,
+ Presigner: c.Presigner,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ })
+ if _, err := stack.Finalize.Swap("Signing", pmw); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil {
+ return err
+ }
+ // convert request to a GET request
+ err = query.AddAsGetRequestMiddleware(stack)
+ if err != nil {
+ return err
+ }
+ err = presignedurlcust.AddAsIsPresigningMiddleware(stack)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+ return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: o.ClientLogMode.IsRequest(),
+ LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
+ LogResponse: o.ClientLogMode.IsResponse(),
+ LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+}
+
+type disableHTTPSMiddleware struct {
+ DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+ return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+ req.URL.Scheme = "http"
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Finalize.Insert(&disableHTTPSMiddleware{
+ DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{
+ Interceptors: opts.Interceptors.BeforeRetryLoop,
+ }, "Retry", middleware.Before)
+}
+
+func addInterceptAttempt(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{
+ BeforeAttempt: opts.Interceptors.BeforeAttempt,
+ AfterAttempt: opts.Interceptors.AfterAttempt,
+ }, "Retry", middleware.After)
+}
+
+func addInterceptExecution(stack *middleware.Stack, opts Options) error {
+ return stack.Initialize.Add(&smithyhttp.InterceptExecution{
+ BeforeExecution: opts.Interceptors.BeforeExecution,
+ AfterExecution: opts.Interceptors.AfterExecution,
+ }, middleware.Before)
+}
+
+func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error {
+ return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{
+ Interceptors: opts.Interceptors.BeforeSerialization,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error {
+ return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{
+ Interceptors: opts.Interceptors.AfterSerialization,
+ }, "OperationSerializer", middleware.After)
+}
+
+func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{
+ Interceptors: opts.Interceptors.BeforeSigning,
+ }, "Signing", middleware.Before)
+}
+
+func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{
+ Interceptors: opts.Interceptors.AfterSigning,
+ }, "Signing", middleware.After)
+}
+
+func addInterceptTransmit(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{
+ BeforeTransmit: opts.Interceptors.BeforeTransmit,
+ AfterTransmit: opts.Interceptors.AfterTransmit,
+ }, middleware.After)
+}
+
+func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{
+ Interceptors: opts.Interceptors.BeforeDeserialization,
+ }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse)
+}
+
+func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{
+ Interceptors: opts.Interceptors.AfterDeserialization,
+ }, "OperationDeserializer", middleware.Before)
+}
+
+type spanInitializeStart struct {
+}
+
+func (*spanInitializeStart) ID() string {
+ return "spanInitializeStart"
+}
+
+func (m *spanInitializeStart) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "Initialize")
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanInitializeEnd struct {
+}
+
+func (*spanInitializeEnd) ID() string {
+ return "spanInitializeEnd"
+}
+
+func (m *spanInitializeEnd) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanBuildRequestStart struct {
+}
+
+func (*spanBuildRequestStart) ID() string {
+ return "spanBuildRequestStart"
+}
+
+func (m *spanBuildRequestStart) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ middleware.SerializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "BuildRequest")
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type spanBuildRequestEnd struct {
+}
+
+func (*spanBuildRequestEnd) ID() string {
+ return "spanBuildRequestEnd"
+}
+
+func (m *spanBuildRequestEnd) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ middleware.BuildOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleBuild(ctx, in)
+}
+
+func addSpanInitializeStart(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before)
+}
+
+func addSpanInitializeEnd(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After)
+}
+
+func addSpanBuildRequestStart(stack *middleware.Stack) error {
+ return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before)
+}
+
+func addSpanBuildRequestEnd(stack *middleware.Stack) error {
+ return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
new file mode 100644
index 000000000..f3a93418f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
@@ -0,0 +1,580 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials that you can use to access
+// Amazon Web Services resources. These temporary credentials consist of an access
+// key ID, a secret access key, and a security token. Typically, you use AssumeRole
+// within your account or for cross-account access. For a comparison of AssumeRole
+// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the
+// IAM User Guide.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any Amazon Web Services service with the following exception: You
+// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken
+// API operations.
+//
+// (Optional) You can pass inline or managed session policies to this operation.
+// You can pass a single JSON policy document to use as an inline session policy.
+// You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use
+// as managed session policies. The plaintext that you use for both inline and
+// managed session policies can't exceed 2,048 characters. Passing policies to this
+// operation returns new temporary credentials. The resulting session's permissions
+// are the intersection of the role's identity-based policy and the session
+// policies. You can use the role's temporary credentials in subsequent Amazon Web
+// Services API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see [Session Policies]in the IAM User Guide.
+//
+// When you create a role, you create two policies: a role trust policy that
+// specifies who can assume the role, and a permissions policy that specifies what
+// can be done with the role. You specify the trusted principal that is allowed to
+// assume the role in the role trust policy.
+//
+// To assume a role from a different account, your Amazon Web Services account
+// must be trusted by the role. The trust relationship is defined in the role's
+// trust policy when the role is created. That trust policy states which accounts
+// are allowed to delegate that access to users in the account.
+//
+// A user who wants to access a role in a different account must also have
+// permissions that are delegated from the account administrator. The administrator
+// must attach a policy that allows the user to call AssumeRole for the ARN of the
+// role in the other account.
+//
+// To allow a user to assume a role in the same account, you can do either of the
+// following:
+//
+// - Attach a policy to the user that allows the user to call AssumeRole (as long
+// as the role's trust policy trusts the account).
+//
+// - Add the user as a principal directly in the role's trust policy.
+//
+// You can do either because the role’s trust policy acts as an IAM resource-based
+// policy. When a resource-based policy grants access to a principal in the same
+// account, no additional identity-based policy is required. For more information
+// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide.
+//
+// # Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These tags are
+// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM
+// User Guide.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # Using MFA with AssumeRole
+//
+// (Optional) You can include multi-factor authentication (MFA) information when
+// you call AssumeRole . This is useful for cross-account scenarios to ensure that
+// the user that assumes the role has been authenticated with an Amazon Web
+// Services MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication. If the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication might
+// look like the following example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that the
+// MFA device produces.
+//
+// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) {
+ if params == nil {
+ params = &AssumeRoleInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AssumeRoleOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AssumeRoleInput struct {
+
+ // The Amazon Resource Name (ARN) of the role to assume.
+ //
+ // This member is required.
+ RoleArn *string
+
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identify a session when the same role is
+ // assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the account
+ // that owns the role. The role session name is also used in the ARN of the assumed
+ // role principal. This means that subsequent cross-account API requests that use
+ // the temporary security credentials will expose the role session name to the
+ // external account in their CloudTrail logs.
+ //
+ // For security purposes, administrators can view this field in [CloudTrail logs] to help identify
+ // who performed an action in Amazon Web Services. Your administrator might require
+ // that you specify your user name as the session name when you assume the role.
+ // For more information, see [sts:RoleSessionName]sts:RoleSessionName .
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds
+ // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname
+ //
+ // This member is required.
+ RoleSessionName *string
+
+ // The duration, in seconds, of the role session. The value specified can range
+ // from 900 seconds (15 minutes) up to the maximum session duration set for the
+ // role. The maximum session duration setting can have a value from 1 hour to 12
+ // hours. If you specify a value higher than this setting or the administrator
+ // setting (whichever is lower), the operation fails. For example, if you specify a
+ // session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails.
+ //
+ // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API
+ // role session to a maximum of one hour. When you use the AssumeRole API
+ // operation to assume a role, you can specify the duration of your role session
+ // with the DurationSeconds parameter. You can specify a parameter value of up to
+ // 43200 seconds (12 hours), depending on the maximum session duration setting for
+ // your role. However, if you assume a role using role chaining and provide a
+ // DurationSeconds parameter value greater than one hour, the operation fails. To
+ // learn how to view the maximum value for your role, see [Update the maximum session duration for a role].
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request to
+ // the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+ //
+ // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration
+ // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
+ DurationSeconds *int32
+
+ // A unique identifier that might be required when you assume a role in another
+ // account. If the administrator of the account to which the role belongs provided
+ // you with an external ID, then provide that value in the ExternalId parameter.
+ // This value can be any string, such as a passphrase or account number. A
+ // cross-account role is usually set up to trust everyone in an account. Therefore,
+ // the administrator of the trusting account might send an external ID to the
+ // administrator of the trusted account. That way, only someone with the ID can
+ // assume the role, rather than everyone in the account. For more information about
+ // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@:/-
+ //
+ // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html
+ ExternalId *string
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use the
+ // role's temporary credentials in subsequent Amazon Web Services API calls to
+ // access resources in the account that owns the role. You cannot use session
+ // policies to grant more permissions than those allowed by the identity-based
+ // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+ // User Guide.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // For more information about role session permissions, see [Session policies].
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ Policy *string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+ // use as managed session policies. The policies must exist in the same account as
+ // the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+ // Amazon Web Services General Reference.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent Amazon Web Services API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see [Session Policies]in the IAM User Guide.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ PolicyArns []types.PolicyDescriptorType
+
+ // A list of previously acquired trusted context assertions in the format of a
+ // JSON array. The trusted context assertion is signed and encrypted by Amazon Web
+ // Services STS.
+ //
+ // The following is an example of a ProvidedContext value that includes a single
+ // trusted context assertion and the ARN of the context provider from which the
+ // trusted context assertion was generated.
+ //
+ // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}]
+ ProvidedContexts []types.ProvidedContext
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy of
+ // the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as
+ // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as
+ // arn:aws:iam::123456789012:mfa/user ).
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ SerialNumber *string
+
+ // The source identity specified by the principal that is calling the AssumeRole
+ // operation. The source identity value persists across [chained role]sessions.
+ //
+ // You can require users to specify a source identity when they assume a role. You
+ // do this by using the [sts:SourceIdentity]sts:SourceIdentity condition key in a role trust policy.
+ // You can use source identity information in CloudTrail logs to determine who took
+ // actions with a role. You can use the aws:SourceIdentity condition key to
+ // further control access to Amazon Web Services resources based on the value of
+ // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the
+ // IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: +=,.@-. You cannot use a
+ // value that begins with the text aws: . This prefix is reserved for Amazon Web
+ // Services internal use.
+ //
+ // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#iam-term-role-chaining
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ // [sts:SourceIdentity]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceidentity
+ SourceIdentity *string
+
+ // A list of session tags that you want to pass. Each session tag consists of a
+ // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions]
+ // in the IAM User Guide.
+ //
+ // This parameter is optional. You can pass up to 50 session tags. The plaintext
+ // session tag keys can’t exceed 128 characters, and the values can’t exceed 256
+ // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // You can pass a session tag with the same key as a tag that is already attached
+ // to the role. When you do, session tags override a role tag with the same key.
+ //
+ // Tag key–value pairs are not case sensitive, but case is preserved. This means
+ // that you cannot have separate Department and department tag keys. Assume that
+ // the role has the Department = Marketing tag and you pass the department =
+ // engineering session tag. Department and department are not saved as separate
+ // tags, and the session tag passed in the request takes precedence over the role
+ // tag.
+ //
+ // Additionally, if you used temporary credentials to perform this operation, the
+ // new session inherits any transitive session tags from the calling session. If
+ // you pass a session tag with the same key as an inherited tag, the operation
+ // fails. To view the inherited tags for a session, see the CloudTrail logs. For
+ // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide.
+ //
+ // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+ // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs
+ Tags []types.Tag
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA. (In other words, if the policy includes a condition that
+ // tests for MFA). If the role being assumed requires MFA and if the TokenCode
+ // value is missing or expired, the AssumeRole call returns an "access denied"
+ // error.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string
+
+ // A list of keys for session tags that you want to set as transitive. If you set
+ // a tag key as transitive, the corresponding key and value passes to subsequent
+ // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+ //
+ // This parameter is optional. The transitive status of a session tag does not
+ // impact its packed binary size.
+ //
+ // If you choose not to specify a transitive tag key, then no tags are passed from
+ // this session to any subsequent sessions.
+ //
+ // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+ TransitiveTagKeys []string
+
+ noSmithyDocumentSerde
+}
+
+// Contains the response to a successful AssumeRole request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type AssumeRoleOutput struct {
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials. For
+ // example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the
+ // RoleSessionName that you specified when you called AssumeRole .
+ AssumedRoleUser *types.AssumedRoleUser
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // A percentage value that indicates the packed size of the session policies and
+ // session tags combined passed in the request. The request fails if the packed
+ // size is greater than 100 percent, which means the policies and tags exceeded the
+ // allowed space.
+ PackedPolicySize *int32
+
+ // The source identity specified by the principal that is calling the AssumeRole
+ // operation.
+ //
+ // You can require users to specify a source identity when they assume a role. You
+ // do this by using the sts:SourceIdentity condition key in a role trust policy.
+ // You can use source identity information in CloudTrail logs to determine who took
+ // actions with a role. You can use the aws:SourceIdentity condition key to
+ // further control access to Amazon Web Services resources based on the value of
+ // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the
+ // IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ SourceIdentity *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRole"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpAssumeRoleValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "AssumeRole",
+ }
+}
+
+// PresignAssumeRole is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &AssumeRoleInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns,
+ c.client.addOperationAssumeRoleMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
new file mode 100644
index 000000000..9dcceec12
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
@@ -0,0 +1,488 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials for users who have been
+// authenticated via a SAML authentication response. This operation provides a
+// mechanism for tying an enterprise identity store or directory to role-based
+// Amazon Web Services access without user-specific credentials or configuration.
+// For a comparison of AssumeRoleWithSAML with the other API operations that
+// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of an
+// access key ID, a secret access key, and a security token. Applications can use
+// these temporary security credentials to sign calls to Amazon Web Services
+// services.
+//
+// # Session Duration
+//
+// By default, the temporary security credentials created by AssumeRoleWithSAML
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. Your role session lasts for the
+// duration that you specify, or until the time specified in the SAML
+// authentication response's SessionNotOnOrAfter value, whichever is shorter. You
+// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the
+// maximum session duration setting for the role. This setting can have a value
+// from 1 hour to 12 hours. To learn how to view the maximum value for your role,
+// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console URL.
+// For more information, see [Using IAM Roles]in the IAM User Guide.
+//
+// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one
+// hour. When you use the AssumeRole API operation to assume a role, you can
+// specify the duration of your role session with the DurationSeconds parameter.
+// You can specify a parameter value of up to 43200 seconds (12 hours), depending
+// on the maximum session duration setting for your role. However, if you assume a
+// role using role chaining and provide a DurationSeconds parameter value greater
+// than one hour, the operation fails.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used to
+// make API calls to any Amazon Web Services service with the following exception:
+// you cannot call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see [Session Policies]in the IAM User Guide.
+//
+// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services
+// security credentials. The identity of the caller is validated by using keys in
+// the metadata document that is uploaded for the SAML provider entity for your
+// identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The
+// entry includes the value in the NameID element of the SAML assertion. We
+// recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the
+// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ).
+//
+// # Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your SAML
+// assertion as session tags. Each session tag consists of a key name and an
+// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+// Guide.
+//
+// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed
+// 128 characters and the values can’t exceed 256 characters. For these and
+// additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+//
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has a
+// separate limit. Your request can fail for this limit even if your plaintext
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the upper
+// size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to the
+// role. When you do, session tags override the role's tags with the same key.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # SAML Configuration
+//
+// Before your application can call AssumeRoleWithSAML , you must configure your
+// SAML identity provider (IdP) to issue the claims required by Amazon Web
+// Services. Additionally, you must use Identity and Access Management (IAM) to
+// create a SAML provider entity in your Amazon Web Services account that
+// represents your identity provider. You must also create an IAM role that
+// specifies this SAML provider in its trust policy.
+//
+// For more information, see the following resources:
+//
+// [About SAML 2.0-based Federation]
+// - in the IAM User Guide.
+//
+// [Creating SAML Identity Providers]
+// - in the IAM User Guide.
+//
+// [Configuring a Relying Party and Claims]
+// - in the IAM User Guide.
+//
+// [Creating a Role for SAML 2.0 Federation]
+// - in the IAM User Guide.
+//
+// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html
+// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html
+// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining
+// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) {
+ if params == nil {
+ params = &AssumeRoleWithSAMLInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AssumeRoleWithSAMLOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AssumeRoleWithSAMLInput struct {
+
+ // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the
+ // IdP.
+ //
+ // This member is required.
+ PrincipalArn *string
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // This member is required.
+ RoleArn *string
+
+ // The base64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide.
+ //
+ // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html
+ //
+ // This member is required.
+ SAMLAssertion *string
+
+ // The duration, in seconds, of the role session. Your role session lasts for the
+ // duration that you specify for the DurationSeconds parameter, or until the time
+ // specified in the SAML authentication response's SessionNotOnOrAfter value,
+ // whichever is shorter. You can provide a DurationSeconds value from 900 seconds
+ // (15 minutes) up to the maximum session duration setting for the role. This
+ // setting can have a value from 1 hour to 12 hours. If you specify a value higher
+ // than this setting, the operation fails. For example, if you specify a session
+ // duration of 12 hours, but your administrator set the maximum session duration to
+ // 6 hours, your operation fails. To learn how to view the maximum value for your
+ // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request to
+ // the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+ //
+ // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+ // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
+ DurationSeconds *int32
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use the
+ // role's temporary credentials in subsequent Amazon Web Services API calls to
+ // access resources in the account that owns the role. You cannot use session
+ // policies to grant more permissions than those allowed by the identity-based
+ // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+ // User Guide.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // For more information about role session permissions, see [Session policies].
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ Policy *string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+ // use as managed session policies. The policies must exist in the same account as
+ // the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+ // Amazon Web Services General Reference.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent Amazon Web Services API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see [Session Policies]in the IAM User Guide.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ PolicyArns []types.PolicyDescriptorType
+
+ noSmithyDocumentSerde
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type AssumeRoleWithSAMLOutput struct {
+
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser *types.AssumedRoleUser
+
+ // The value of the Recipient attribute of the SubjectConfirmationData element of
+ // the SAML assertion.
+ Audience *string
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // The value of the Issuer element of the SAML assertion.
+ Issuer *string
+
+ // A hash value based on the concatenation of the following:
+ //
+ // - The Issuer response value.
+ //
+ // - The Amazon Web Services account ID.
+ //
+ // - The friendly name (the last part of the ARN) of the SAML provider in IAM.
+ //
+ // The combination of NameQualifier and Subject can be used to uniquely identify a
+ // user.
+ //
+ // The following pseudocode shows how the hash value is calculated:
+ //
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) )
+ NameQualifier *string
+
+ // A percentage value that indicates the packed size of the session policies and
+ // session tags combined passed in the request. The request fails if the packed
+ // size is greater than 100 percent, which means the policies and tags exceeded the
+ // allowed space.
+ PackedPolicySize *int32
+
+ // The value in the SourceIdentity attribute in the SAML assertion. The source
+ // identity value persists across [chained role]sessions.
+ //
+ // You can require users to set a source identity value when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. That way, actions that are taken with the role are associated with that
+ // user. After the source identity is set, the value cannot be changed. It is
+ // present in the request for all actions that are taken by the role and persists
+ // across [chained role]sessions. You can configure your SAML identity provider to use an
+ // attribute associated with your users, like user name or email, as the source
+ // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to
+ // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in
+ // the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ SourceIdentity *string
+
+ // The value of the NameID element in the Subject element of the SAML assertion.
+ Subject *string
+
+ // The format of the name ID, as defined by the Format attribute in the NameID
+ // element of the SAML assertion. Typical examples of the format are transient or
+ // persistent .
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format ,
+ // that prefix is removed. For example,
+ // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient .
+ // If the format includes any other prefix, the format is returned with no
+ // modifications.
+ SubjectType *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithSAML"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "AssumeRoleWithSAML",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
new file mode 100644
index 000000000..5975a0cde
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
@@ -0,0 +1,508 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials for users who have been
+// authenticated in a mobile or web application with a web identity provider.
+// Example providers include the OAuth 2.0 providers Login with Amazon and
+// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities].
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can use
+// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also
+// supply the user with a consistent identity throughout the lifetime of an
+// application.
+//
+// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web
+// Services security credentials. Therefore, you can distribute an application (for
+// example, on mobile devices) that requests temporary security credentials without
+// including long-term Amazon Web Services credentials in the application. You also
+// don't need to deploy server-based proxy services that use long-term Amazon Web
+// Services credentials. Instead, the identity of the caller is validated by using
+// a token from the web identity provider. For a comparison of
+// AssumeRoleWithWebIdentity with the other API operations that produce temporary
+// credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to Amazon Web Services service API
+// operations.
+//
+// # Session Duration
+//
+// By default, the temporary security credentials created by
+// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional
+// DurationSeconds parameter to specify the duration of your session. You can
+// provide a value from 900 seconds (15 minutes) up to the maximum session duration
+// setting for the role. This setting can have a value from 1 hour to 12 hours. To
+// learn how to view the maximum value for your role, see [Update the maximum session duration for a role]in the IAM User Guide.
+// The maximum session duration limit applies when you use the AssumeRole* API
+// operations or the assume-role* CLI commands. However the limit does not apply
+// when you use those operations to create a console URL. For more information, see
+// [Using IAM Roles]in the IAM User Guide.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can be
+// used to make API calls to any Amazon Web Services service with the following
+// exception: you cannot call the STS GetFederationToken or GetSessionToken API
+// operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
+// identity-based policy of the role that is being assumed. For more information,
+// see [Session Policies]in the IAM User Guide.
+//
+// # Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your web identity
+// token as session tags. Each session tag consists of a key name and an associated
+// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide.
+//
+// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed
+// 128 characters and the values can’t exceed 256 characters. For these and
+// additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+//
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has a
+// separate limit. Your request can fail for this limit even if your plaintext
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the upper
+// size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to the
+// role. When you do, the session tag overrides the role tag with the same key.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # Identities
+//
+// Before your application can call AssumeRoleWithWebIdentity , you must have an
+// identity token from a supported identity provider and create a role that the
+// application can assume. The role that your application assumes must trust the
+// identity provider that is associated with the identity token. In other words,
+// the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail
+// logs. The entry includes the [Subject]of the provided web identity token. We recommend
+// that you avoid using any personally identifiable information (PII) in this
+// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification].
+//
+// For more information about how to use OIDC federation and the
+// AssumeRoleWithWebIdentity API, see the following resources:
+//
+// [Using Web Identity Federation API Operations for Mobile Apps]
+// - and [Federation Through a Web-based Identity Provider].
+//
+// [Amazon Web Services SDK for iOS Developer Guide]
+// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the
+// identity providers. The toolkits then show how to use the information from these
+// providers to get and use temporary security credentials.
+//
+// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/
+// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/
+// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
+// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html
+// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity
+// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+// [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration
+// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html
+// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
+func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) {
+ if params == nil {
+ params = &AssumeRoleWithWebIdentityInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AssumeRoleWithWebIdentityOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AssumeRoleWithWebIdentityInput struct {
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // Additional considerations apply to Amazon Cognito identity pools that assume [cross-account IAM roles].
+ // The trust policies of these roles must accept the cognito-identity.amazonaws.com
+ // service principal and must contain the cognito-identity.amazonaws.com:aud
+ // condition key to restrict role assumption to users from your intended identity
+ // pools. A policy that trusts Amazon Cognito identity pools without this condition
+ // creates a risk that a user from an unintended identity pool can assume the role.
+ // For more information, see [Trust policies for IAM roles in Basic (Classic) authentication]in the Amazon Cognito Developer Guide.
+ //
+ // [cross-account IAM roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies-cross-account-resource-access.html
+ // [Trust policies for IAM roles in Basic (Classic) authentication]: https://docs.aws.amazon.com/cognito/latest/developerguide/iam-roles.html#trust-policies
+ //
+ // This member is required.
+ RoleArn *string
+
+ // An identifier for the assumed role session. Typically, you pass the name or
+ // identifier that is associated with the user who is using your application. That
+ // way, the temporary security credentials that your application will use are
+ // associated with that user. This session name is included as part of the ARN and
+ // assumed role ID in the AssumedRoleUser response element.
+ //
+ // For security purposes, administrators can view this field in [CloudTrail logs] to help identify
+ // who performed an action in Amazon Web Services. Your administrator might require
+ // that you specify your user name as the session name when you assume the role.
+ // For more information, see [sts:RoleSessionName]sts:RoleSessionName .
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds
+ // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname
+ //
+ // This member is required.
+ RoleSessionName *string
+
+ // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the
+ // identity provider. Your application must get this token by authenticating the
+ // user who is using your application with a web identity provider before the
+ // application makes an AssumeRoleWithWebIdentity call. Timestamps in the token
+ // must be formatted as either an integer or a long integer. Tokens must be signed
+ // using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or
+ // ES512).
+ //
+ // This member is required.
+ WebIdentityToken *string
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify a
+ // session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request to
+ // the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+ //
+ // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+ // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
+ DurationSeconds *int32
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use the
+ // role's temporary credentials in subsequent Amazon Web Services API calls to
+ // access resources in the account that owns the role. You cannot use session
+ // policies to grant more permissions than those allowed by the identity-based
+ // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+ // User Guide.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // For more information about role session permissions, see [Session policies].
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ Policy *string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+ // use as managed session policies. The policies must exist in the same account as
+ // the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+ // Amazon Web Services General Reference.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent Amazon Web Services API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see [Session Policies]in the IAM User Guide.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ PolicyArns []types.PolicyDescriptorType
+
+ // The fully qualified host component of the domain name of the OAuth 2.0 identity
+ // provider. Do not specify this value for an OpenID Connect identity provider.
+ //
+ // Currently www.amazon.com and graph.facebook.com are the only supported identity
+ // providers for OAuth 2.0 access tokens. Do not include URL schemes and port
+ // numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
+ ProviderId *string
+
+ noSmithyDocumentSerde
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type AssumeRoleWithWebIdentityOutput struct {
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials. For
+ // example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the
+ // RoleSessionName that you specified when you called AssumeRole .
+ AssumedRoleUser *types.AssumedRoleUser
+
+ // The intended audience (also known as client ID) of the web identity token. This
+ // is traditionally the client identifier issued to the application that requested
+ // the web identity token.
+ Audience *string
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // A percentage value that indicates the packed size of the session policies and
+ // session tags combined passed in the request. The request fails if the packed
+ // size is greater than 100 percent, which means the policies and tags exceeded the
+ // allowed space.
+ PackedPolicySize *int32
+
+ // The issuing authority of the web identity token presented. For OpenID Connect
+ // ID tokens, this contains the value of the iss field. For OAuth 2.0 access
+ // tokens, this contains the value of the ProviderId parameter that was passed in
+ // the AssumeRoleWithWebIdentity request.
+ Provider *string
+
+ // The value of the source identity that is returned in the JSON web token (JWT)
+ // from the identity provider.
+ //
+ // You can require users to set a source identity value when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. That way, actions that are taken with the role are associated with that
+ // user. After the source identity is set, the value cannot be changed. It is
+ // present in the request for all actions that are taken by the role and persists
+ // across [chained role]sessions. You can configure your identity provider to use an attribute
+ // associated with your users, like user name or email, as the source identity when
+ // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON
+ // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon
+ // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles]
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
+ SourceIdentity *string
+
+ // The unique user identifier that is returned by the identity provider. This
+ // identifier is associated with the WebIdentityToken that was submitted with the
+ // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user
+ // and the application that acquired the WebIdentityToken (pairwise identifier).
+ // For OpenID Connect ID tokens, this field contains the value returned by the
+ // identity provider as the token's sub (Subject) claim.
+ SubjectFromWebIdentityToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithWebIdentity"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "AssumeRoleWithWebIdentity",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go
new file mode 100644
index 000000000..571f06728
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go
@@ -0,0 +1,253 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of short term credentials you can use to perform privileged tasks
+// on a member account in your organization.
+//
+// Before you can launch a privileged session, you must have centralized root
+// access in your organization. For steps to enable this feature, see [Centralize root access for member accounts]in the IAM
+// User Guide.
+//
+// The STS global endpoint is not supported for AssumeRoot. You must send this
+// request to a Regional STS endpoint. For more information, see [Endpoints].
+//
+// You can track AssumeRoot in CloudTrail logs to determine what actions were
+// performed in a session. For more information, see [Track privileged tasks in CloudTrail]in the IAM User Guide.
+//
+// [Endpoints]: https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html#sts-endpoints
+// [Track privileged tasks in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-track-privileged-tasks.html
+// [Centralize root access for member accounts]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-enable-root-access.html
+func (c *Client) AssumeRoot(ctx context.Context, params *AssumeRootInput, optFns ...func(*Options)) (*AssumeRootOutput, error) {
+ if params == nil {
+ params = &AssumeRootInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AssumeRoot", params, optFns, c.addOperationAssumeRootMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AssumeRootOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AssumeRootInput struct {
+
+ // The member account principal ARN or account ID.
+ //
+ // This member is required.
+ TargetPrincipal *string
+
+ // The identity based policy that scopes the session to the privileged tasks that
+ // can be performed. You can use one of following Amazon Web Services managed
+ // policies to scope root session actions.
+ //
+ // [IAMAuditRootUserCredentials]
+ //
+ // [IAMCreateRootUserPassword]
+ //
+ // [IAMDeleteRootUserCredentials]
+ //
+ // [S3UnlockBucketPolicy]
+ //
+ // [SQSUnlockQueuePolicy]
+ //
+ // [IAMDeleteRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMDeleteRootUserCredentials
+ // [IAMCreateRootUserPassword]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMCreateRootUserPassword
+ // [IAMAuditRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMAuditRootUserCredentials
+ // [S3UnlockBucketPolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-S3UnlockBucketPolicy
+ // [SQSUnlockQueuePolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-SQSUnlockQueuePolicy
+ //
+ // This member is required.
+ TaskPolicyArn *types.PolicyDescriptorType
+
+ // The duration, in seconds, of the privileged session. The value can range from 0
+ // seconds up to the maximum session duration of 900 seconds (15 minutes). If you
+ // specify a value higher than this setting, the operation fails.
+ //
+ // By default, the value is set to 900 seconds.
+ DurationSeconds *int32
+
+ noSmithyDocumentSerde
+}
+
+type AssumeRootOutput struct {
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // The source identity specified by the principal that is calling the AssumeRoot
+ // operation.
+ //
+ // You can use the aws:SourceIdentity condition key to control access based on the
+ // value of source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ SourceIdentity *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationAssumeRootMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoot{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoot{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoot"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpAssumeRootValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoot(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRoot(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "AssumeRoot",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
new file mode 100644
index 000000000..786bac89b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
@@ -0,0 +1,225 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Decodes additional information about the authorization status of a request from
+// an encoded message returned in response to an Amazon Web Services request.
+//
+// For example, if a user is not authorized to perform an operation that he or she
+// has requested, the request returns a Client.UnauthorizedOperation response (an
+// HTTP 403 response). Some Amazon Web Services operations additionally return an
+// encoded message that can provide details about this authorization failure.
+//
+// Only certain Amazon Web Services operations return an encoded authorization
+// message. The documentation for an individual operation indicates whether that
+// operation returns an encoded message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// contain privileged information that the user who requested the operation should
+// not see. To decode an authorization status message, a user must be granted
+// permissions through an IAM [policy]to request the DecodeAuthorizationMessage (
+// sts:DecodeAuthorizationMessage ) action.
+//
+// The decoded message includes the following type of information:
+//
+// - Whether the request was denied due to an explicit deny or due to the
+// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User
+// Guide.
+//
+// - The principal who made the request.
+//
+// - The requested action.
+//
+// - The requested resource.
+//
+// - The values of condition keys in the context of the user's request.
+//
+// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow
+// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
+func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) {
+ if params == nil {
+ params = &DecodeAuthorizationMessageInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DecodeAuthorizationMessageOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DecodeAuthorizationMessageInput struct {
+
+ // The encoded message that was returned with the response.
+ //
+ // This member is required.
+ EncodedMessage *string
+
+ noSmithyDocumentSerde
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an Amazon
+// Web Services request.
+type DecodeAuthorizationMessageOutput struct {
+
+ // The API returns a response with the decoded message.
+ DecodedMessage *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DecodeAuthorizationMessage"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DecodeAuthorizationMessage",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
new file mode 100644
index 000000000..6c1f87898
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
@@ -0,0 +1,216 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the account identifier for the specified access key ID.
+//
+// Access keys consist of two parts: an access key ID (for example,
+// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example,
+// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access
+// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide.
+//
+// When you pass an access key ID to this operation, it returns the ID of the
+// Amazon Web Services account to which the keys belong. Access key IDs beginning
+// with AKIA are long-term credentials for an IAM user or the Amazon Web Services
+// account root user. Access key IDs beginning with ASIA are temporary credentials
+// that are created using STS operations. If the account in the response belongs to
+// you, you can sign in as the root user and review your root user access keys.
+// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who
+// requested the temporary credentials for an ASIA access key, view the STS events
+// in your [CloudTrail logs]in the IAM User Guide.
+//
+// This operation does not indicate the state of the access key. The key might be
+// active, inactive, or deleted. Active keys might not have permissions to perform
+// an operation. Providing a deleted access key might return an error that the key
+// doesn't exist.
+//
+// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html
+// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html
+// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html
+func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) {
+ if params == nil {
+ params = &GetAccessKeyInfoInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetAccessKeyInfoOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetAccessKeyInfoInput struct {
+
+ // The identifier of an access key.
+ //
+ // This parameter allows (through its regex pattern) a string of characters that
+ // can consist of any upper- or lowercase letter or digit.
+ //
+ // This member is required.
+ AccessKeyId *string
+
+ noSmithyDocumentSerde
+}
+
+type GetAccessKeyInfoOutput struct {
+
+ // The number used to identify the Amazon Web Services account.
+ Account *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetAccessKeyInfo"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetAccessKeyInfo",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
new file mode 100644
index 000000000..7d0653398
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
@@ -0,0 +1,228 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns details about the IAM user or role whose credentials are used to call
+// the operation.
+//
+// No permissions are required to perform this operation. If an administrator
+// attaches a policy to your identity that explicitly denies access to the
+// sts:GetCallerIdentity action, you can still perform this operation. Permissions
+// are not required because the same information is returned when access is denied.
+// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide.
+//
+// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa
+func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) {
+ if params == nil {
+ params = &GetCallerIdentityInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetCallerIdentityOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetCallerIdentityInput struct {
+ noSmithyDocumentSerde
+}
+
+// Contains the response to a successful GetCallerIdentity request, including information about the
+// entity making the request.
+type GetCallerIdentityOutput struct {
+
+ // The Amazon Web Services account ID number of the account that owns or contains
+ // the calling entity.
+ Account *string
+
+ // The Amazon Web Services ARN associated with the calling entity.
+ Arn *string
+
+ // The unique identifier of the calling entity. The exact value depends on the
+ // type of entity that is making the call. The values returned are those listed in
+ // the aws:userid column in the [Principal table]found on the Policy Variables reference page in
+ // the IAM User Guide.
+ //
+ // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable
+ UserId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetCallerIdentity"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetCallerIdentity",
+ }
+}
+
+// PresignGetCallerIdentity is used to generate a presigned HTTP Request which
+// contains presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &GetCallerIdentityInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns,
+ c.client.addOperationGetCallerIdentityMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
new file mode 100644
index 000000000..1c2f28e51
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
@@ -0,0 +1,429 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary security credentials (consisting of an access key
+// ID, a secret access key, and a security token) for a user. A typical use is in a
+// proxy application that gets temporary security credentials on behalf of
+// distributed applications inside a corporate network.
+//
+// You must call the GetFederationToken operation using the long-term security
+// credentials of an IAM user. As a result, this call is appropriate in contexts
+// where those credentials can be safeguarded, usually in a server-based
+// application. For a comparison of GetFederationToken with the other API
+// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide.
+//
+// Although it is possible to call GetFederationToken using the security
+// credentials of an Amazon Web Services account root user rather than an IAM user
+// that you create for the purpose of a proxy application, we do not recommend it.
+// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate users
+// using a web identity provider like Login with Amazon, Facebook, Google, or an
+// OpenID Connect-compatible identity provider. In this case, we recommend that you
+// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User
+// Guide.
+//
+// # Session duration
+//
+// The temporary credentials are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
+// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by
+// using the root user credentials have a maximum duration of 3,600 seconds (1
+// hour).
+//
+// # Permissions
+//
+// You can use the temporary credentials created by GetFederationToken in any
+// Amazon Web Services service with the following exceptions:
+//
+// - You cannot call any IAM operations using the CLI or the Amazon Web Services
+// API. This limitation does not apply to console sessions.
+//
+// - You cannot call any STS operations except GetCallerIdentity .
+//
+// You can use temporary credentials for single sign-on (SSO) to the console.
+//
+// You must pass an inline or managed [session policy] to this operation. You can pass a single
+// JSON policy document to use as an inline session policy. You can also specify up
+// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+// policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters.
+//
+// Though the session policy parameters are optional, if you do not pass a policy,
+// then the resulting federated user session has no permissions. When you pass
+// session policies, the session permissions are the intersection of the IAM user
+// policies and the session policies that you pass. This gives you a way to further
+// restrict the permissions for a federated user. You cannot use session policies
+// to grant more permissions than those that are defined in the permissions policy
+// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For
+// information about using GetFederationToken to create temporary security
+// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker].
+//
+// You can use the credentials to access a resource that has a resource-based
+// policy. If that policy specifically references the federated user session in the
+// Principal element of the policy, the session has the permissions allowed by the
+// policy. These permissions are granted in addition to the permissions granted by
+// the session policies.
+//
+// # Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These are called
+// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+// Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate users
+// using a web identity provider like Login with Amazon, Facebook, Google, or an
+// OpenID Connect-compatible identity provider. In this case, we recommend that you
+// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User
+// Guide.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// Tag key–value pairs are not case sensitive, but case is preserved. This means
+// that you cannot have separate Department and department tag keys. Assume that
+// the user that you are federating has the Department = Marketing tag and you
+// pass the department = engineering session tag. Department and department are
+// not saved as separate tags, and the session tag passed in the request takes
+// precedence over the user tag.
+//
+// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity
+// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Amazon Cognito]: http://aws.amazon.com/cognito/
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken
+// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) {
+ if params == nil {
+ params = &GetFederationTokenInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetFederationTokenOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetFederationTokenInput struct {
+
+ // The name of the federated user. The name is used as an identifier for the
+ // temporary security credentials (such as Bob ). For example, you can reference
+ // the federated user name in a resource-based policy, such as in an Amazon S3
+ // bucket policy.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // This member is required.
+ Name *string
+
+ // The duration, in seconds, that the session should last. Acceptable durations
+ // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
+ // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
+ // using root user credentials are restricted to a maximum of 3,600 seconds (one
+ // hour). If the specified duration is longer than one hour, the session obtained
+ // by using root user credentials defaults to one hour.
+ DurationSeconds *int32
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // You must pass an inline or managed [session policy] to this operation. You can pass a single
+ // JSON policy document to use as an inline session policy. You can also specify up
+ // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+ // policies.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions.
+ //
+ // When you pass session policies, the session permissions are the intersection of
+ // the IAM user policies and the session policies that you pass. This gives you a
+ // way to further restrict the permissions for a federated user. You cannot use
+ // session policies to grant more permissions than those that are defined in the
+ // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User
+ // Guide.
+ //
+ // The resulting credentials can be used to access a resource that has a
+ // resource-based policy. If that policy specifically references the federated user
+ // session in the Principal element of the policy, the session has the permissions
+ // allowed by the policy. These permissions are granted in addition to the
+ // permissions that are granted by the session policies.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ Policy *string
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
+ // use as a managed session policy. The policies must exist in the same account as
+ // the IAM user that is requesting federated access.
+ //
+ // You must pass an inline or managed [session policy] to this operation. You can pass a single
+ // JSON policy document to use as an inline session policy. You can also specify up
+ // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+ // policies. The plaintext that you use for both inline and managed session
+ // policies can't exceed 2,048 characters. You can provide up to 10 managed policy
+ // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General
+ // Reference.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions.
+ //
+ // When you pass session policies, the session permissions are the intersection of
+ // the IAM user policies and the session policies that you pass. This gives you a
+ // way to further restrict the permissions for a federated user. You cannot use
+ // session policies to grant more permissions than those that are defined in the
+ // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User
+ // Guide.
+ //
+ // The resulting credentials can be used to access a resource that has a
+ // resource-based policy. If that policy specifically references the federated user
+ // session in the Principal element of the policy, the session has the permissions
+ // allowed by the policy. These permissions are granted in addition to the
+ // permissions that are granted by the session policies.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ PolicyArns []types.PolicyDescriptorType
+
+ // A list of session tags. Each session tag consists of a key name and an
+ // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+ // Guide.
+ //
+ // This parameter is optional. You can pass up to 50 session tags. The plaintext
+ // session tag keys can’t exceed 128 characters and the values can’t exceed 256
+ // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // You can pass a session tag with the same key as a tag that is already attached
+ // to the user you are federating. When you do, session tags override a user tag
+ // with the same key.
+ //
+ // Tag key–value pairs are not case sensitive, but case is preserved. This means
+ // that you cannot have separate Department and department tag keys. Assume that
+ // the role has the Department = Marketing tag and you pass the department =
+ // engineering session tag. Department and department are not saved as separate
+ // tags, and the session tag passed in the request takes precedence over the role
+ // tag.
+ //
+ // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+ Tags []types.Tag
+
+ noSmithyDocumentSerde
+}
+
+// Contains the response to a successful GetFederationToken request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type GetFederationTokenOutput struct {
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // Identifiers for the federated user associated with the credentials (such as
+ // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use
+ // the federated user's ARN in your resource-based policies, such as an Amazon S3
+ // bucket policy.
+ FederatedUser *types.FederatedUser
+
+ // A percentage value that indicates the packed size of the session policies and
+ // session tags combined passed in the request. The request fails if the packed
+ // size is greater than 100 percent, which means the policies and tags exceeded the
+ // allowed space.
+ PackedPolicySize *int32
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetFederationToken"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetFederationToken",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
new file mode 100644
index 000000000..256046990
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
@@ -0,0 +1,275 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of temporary credentials for an Amazon Web Services account or
+// IAM user. The credentials consist of an access key ID, a secret access key, and
+// a security token. Typically, you use GetSessionToken if you want to use MFA to
+// protect programmatic calls to specific Amazon Web Services API operations like
+// Amazon EC2 StopInstances .
+//
+// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is
+// associated with their MFA device. Using the temporary security credentials that
+// the call returns, IAM users can then make programmatic calls to API operations
+// that require MFA authentication. An incorrect MFA code causes the API to return
+// an access denied error. For a comparison of GetSessionToken with the other API
+// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide.
+//
+// No permissions are required for users to perform this operation. The purpose of
+// the sts:GetSessionToken operation is to authenticate the user using MFA. You
+// cannot use policies to control authentication operations. For more information,
+// see [Permissions for GetSessionToken]in the IAM User Guide.
+//
+// # Session Duration
+//
+// The GetSessionToken operation must be called by using the long-term Amazon Web
+// Services security credentials of an IAM user. Credentials that are created by
+// IAM users are valid for the duration that you specify. This duration can range
+// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours),
+// with a default of 43,200 seconds (12 hours). Credentials based on account
+// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1
+// hour), with a default of 1 hour.
+//
+// # Permissions
+//
+// The temporary security credentials created by GetSessionToken can be used to
+// make API calls to any Amazon Web Services service with the following exceptions:
+//
+// - You cannot call any IAM API operations unless MFA authentication
+// information is included in the request.
+//
+// - You cannot call any STS API except AssumeRole or GetCallerIdentity .
+//
+// The credentials that GetSessionToken returns are based on permissions
+// associated with the IAM user whose credentials were used to call the operation.
+// The temporary credentials have the same permissions as the IAM user.
+//
+// Although it is possible to call GetSessionToken using the security credentials
+// of an Amazon Web Services account root user rather than an IAM user, we do not
+// recommend it. If GetSessionToken is called using root user credentials, the
+// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in
+// the IAM User Guide
+//
+// For more information about using GetSessionToken to create temporary
+// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide.
+//
+// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html
+// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken
+// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
+func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) {
+ if params == nil {
+ params = &GetSessionTokenInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetSessionTokenOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetSessionTokenInput struct {
+
+ // The duration, in seconds, that the credentials should remain valid. Acceptable
+ // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600
+ // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for
+ // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds
+ // (one hour). If the duration is longer than one hour, the session for Amazon Web
+ // Services account owners defaults to one hour.
+ DurationSeconds *int32
+
+ // The identification number of the MFA device that is associated with the IAM
+ // user who is making the GetSessionToken call. Specify this value if the IAM user
+ // has a policy that requires MFA authentication. The value is either the serial
+ // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name
+ // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You
+ // can find the device for an IAM user by going to the Amazon Web Services
+ // Management Console and viewing the user's security credentials.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@:/-
+ SerialNumber *string
+
+ // The value provided by the MFA device, if MFA is required. If any policy
+ // requires the IAM user to submit an MFA code, specify this value. If MFA
+ // authentication is required, the user must provide a code when requesting a set
+ // of temporary security credentials. A user who fails to provide the code receives
+ // an "access denied" response when requesting resources that require MFA
+ // authentication.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string
+
+ noSmithyDocumentSerde
+}
+
+// Contains the response to a successful GetSessionToken request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
+type GetSessionTokenOutput struct {
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetSessionToken"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetSessionToken",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go
new file mode 100644
index 000000000..2a81b3fb1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go
@@ -0,0 +1,351 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "slices"
+ "strings"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+ params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+ return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+ }
+ }
+
+ if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+ // The name of the operation being invoked.
+ Operation string
+
+ // The region in which the operation is being invoked.
+ Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+ params := &AuthResolverParameters{
+ Operation: operation,
+ }
+
+ bindAuthParamsRegion(ctx, params, input, options)
+
+ return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+ ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ if overrides, ok := operationAuthOptions[params.Operation]; ok {
+ return overrides(params), nil
+ }
+ return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+ "AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {
+ SchemeID: smithyauth.SchemeIDSigV4,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4SigningName(&props, "sts")
+ smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+ return props
+ }(),
+ },
+ }
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveAuthScheme")
+ defer span.End()
+
+ params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+ options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+ }
+
+ scheme, ok := m.selectScheme(options)
+ if !ok {
+ return out, metadata, fmt.Errorf("could not select an auth scheme")
+ }
+
+ ctx = setResolvedAuthScheme(ctx, scheme)
+
+ span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID())
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+ sorted := sortAuthOptions(options, m.options.AuthSchemePreference)
+ for _, option := range sorted {
+ if option.SchemeID == smithyauth.SchemeIDAnonymous {
+ return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+ }
+
+ for _, scheme := range m.options.AuthSchemes {
+ if scheme.SchemeID() != option.SchemeID {
+ continue
+ }
+
+ if scheme.IdentityResolver(m.options) != nil {
+ return newResolvedAuthScheme(scheme, option), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option {
+ byPriority := make([]*smithyauth.Option, 0, len(options))
+ for _, prefName := range preferred {
+ for _, option := range options {
+ optName := option.SchemeID
+ if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 {
+ optName = parts[1]
+ }
+ if prefName == optName {
+ byPriority = append(byPriority, option)
+ }
+ }
+ }
+ for _, option := range options {
+ if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool {
+ return o.SchemeID == option.SchemeID
+ }) {
+ byPriority = append(byPriority, option)
+ }
+ }
+ return byPriority
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+ Scheme smithyhttp.AuthScheme
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+ return &resolvedAuthScheme{
+ Scheme: scheme,
+ IdentityProperties: option.IdentityProperties,
+ SignerProperties: option.SignerProperties,
+ }
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+ return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+ v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+ return v
+}
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ innerCtx, span := tracing.StartSpan(ctx, "GetIdentity")
+ defer span.End()
+
+ rscheme := getResolvedAuthScheme(innerCtx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ resolver := rscheme.Scheme.IdentityResolver(m.options)
+ if resolver == nil {
+ return out, metadata, fmt.Errorf("no identity resolver")
+ }
+
+ identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration",
+ func() (smithyauth.Identity, error) {
+ return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties)
+ },
+ func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("get identity: %w", err)
+ }
+
+ ctx = setIdentity(ctx, identity)
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+ return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+ v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+ return v
+}
+
+type signRequestMiddleware struct {
+ options Options
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "SignRequest")
+ defer span.End()
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ identity := getIdentity(ctx)
+ if identity == nil {
+ return out, metadata, fmt.Errorf("no identity")
+ }
+
+ signer := rscheme.Scheme.Signer()
+ if signer == nil {
+ return out, metadata, fmt.Errorf("no signer")
+ }
+
+ _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) {
+ return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties)
+ }, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("sign request: %w", err)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
new file mode 100644
index 000000000..a1ac917ec
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
@@ -0,0 +1,2710 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ smithy "github.com/aws/smithy-go"
+ smithyxml "github.com/aws/smithy-go/encoding/xml"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithytime "github.com/aws/smithy-go/time"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "strconv"
+ "strings"
+)
+
+type awsAwsquery_deserializeOpAssumeRole struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRole) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata)
+ }
+ output := &AssumeRoleOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("AssumeRoleResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("MalformedPolicyDocument", errorCode):
+ return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+ case strings.EqualFold("PackedPolicyTooLarge", errorCode):
+ return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpAssumeRoleWithSAML struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata)
+ }
+ output := &AssumeRoleWithSAMLOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("AssumeRoleWithSAMLResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("IDPRejectedClaim", errorCode):
+ return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody)
+
+ case strings.EqualFold("InvalidIdentityToken", errorCode):
+ return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody)
+
+ case strings.EqualFold("MalformedPolicyDocument", errorCode):
+ return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+ case strings.EqualFold("PackedPolicyTooLarge", errorCode):
+ return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata)
+ }
+ output := &AssumeRoleWithWebIdentityOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("IDPCommunicationError", errorCode):
+ return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody)
+
+ case strings.EqualFold("IDPRejectedClaim", errorCode):
+ return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody)
+
+ case strings.EqualFold("InvalidIdentityToken", errorCode):
+ return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody)
+
+ case strings.EqualFold("MalformedPolicyDocument", errorCode):
+ return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+ case strings.EqualFold("PackedPolicyTooLarge", errorCode):
+ return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpAssumeRoot struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRoot) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRoot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoot(response, &metadata)
+ }
+ output := &AssumeRootOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("AssumeRootResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentAssumeRootOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRoot(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct {
+}
+
+func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata)
+ }
+ output := &DecodeAuthorizationMessageOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("DecodeAuthorizationMessageResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("InvalidAuthorizationMessageException", errorCode):
+ return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpGetAccessKeyInfo struct {
+}
+
+func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata)
+ }
+ output := &GetAccessKeyInfoOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("GetAccessKeyInfoResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpGetCallerIdentity struct {
+}
+
+func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata)
+ }
+ output := &GetCallerIdentityOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("GetCallerIdentityResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpGetFederationToken struct {
+}
+
+func (*awsAwsquery_deserializeOpGetFederationToken) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata)
+ }
+ output := &GetFederationTokenOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("GetFederationTokenResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("MalformedPolicyDocument", errorCode):
+ return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody)
+
+ case strings.EqualFold("PackedPolicyTooLarge", errorCode):
+ return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsquery_deserializeOpGetSessionToken struct {
+}
+
+func (*awsAwsquery_deserializeOpGetSessionToken) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata)
+ }
+ output := &GetSessionTokenOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("GetSessionTokenResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.ExpiredTokenException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.IDPCommunicationErrorException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.IDPRejectedClaimException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidAuthorizationMessageException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidIdentityTokenException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.MalformedPolicyDocumentException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.PackedPolicyTooLargeException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.RegionDisabledException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(errorBody, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return output
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("Error")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ return output
+}
+
+func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.AssumedRoleUser
+ if *v == nil {
+ sv = &types.AssumedRoleUser{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Arn", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Arn = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("AssumedRoleId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AssumedRoleId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.Credentials
+ if *v == nil {
+ sv = &types.Credentials{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AccessKeyId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.AccessKeyId = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Expiration", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ t, err := smithytime.ParseDateTime(xtv)
+ if err != nil {
+ return err
+ }
+ sv.Expiration = ptr.Time(t)
+ }
+
+ case strings.EqualFold("SecretAccessKey", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SecretAccessKey = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SessionToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SessionToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.ExpiredTokenException
+ if *v == nil {
+ sv = &types.ExpiredTokenException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.FederatedUser
+ if *v == nil {
+ sv = &types.FederatedUser{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Arn", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Arn = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("FederatedUserId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.FederatedUserId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IDPCommunicationErrorException
+ if *v == nil {
+ sv = &types.IDPCommunicationErrorException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.IDPRejectedClaimException
+ if *v == nil {
+ sv = &types.IDPRejectedClaimException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InvalidAuthorizationMessageException
+ if *v == nil {
+ sv = &types.InvalidAuthorizationMessageException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.InvalidIdentityTokenException
+ if *v == nil {
+ sv = &types.InvalidIdentityTokenException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.MalformedPolicyDocumentException
+ if *v == nil {
+ sv = &types.MalformedPolicyDocumentException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.PackedPolicyTooLargeException
+ if *v == nil {
+ sv = &types.PackedPolicyTooLargeException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *types.RegionDisabledException
+ if *v == nil {
+ sv = &types.RegionDisabledException{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("message", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Message = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *AssumeRoleOutput
+ if *v == nil {
+ sv = &AssumeRoleOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AssumedRoleUser", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("PackedPolicySize", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PackedPolicySize = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("SourceIdentity", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SourceIdentity = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *AssumeRoleWithSAMLOutput
+ if *v == nil {
+ sv = &AssumeRoleWithSAMLOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AssumedRoleUser", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Audience", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Audience = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Issuer", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Issuer = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("NameQualifier", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.NameQualifier = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("PackedPolicySize", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PackedPolicySize = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("SourceIdentity", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SourceIdentity = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Subject", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Subject = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SubjectType", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SubjectType = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *AssumeRoleWithWebIdentityOutput
+ if *v == nil {
+ sv = &AssumeRoleWithWebIdentityOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("AssumedRoleUser", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("Audience", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Audience = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("PackedPolicySize", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PackedPolicySize = ptr.Int32(int32(i64))
+ }
+
+ case strings.EqualFold("Provider", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Provider = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SourceIdentity", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SourceIdentity = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SubjectFromWebIdentityToken = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *AssumeRootOutput
+ if *v == nil {
+ sv = &AssumeRootOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("SourceIdentity", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SourceIdentity = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *DecodeAuthorizationMessageOutput
+ if *v == nil {
+ sv = &DecodeAuthorizationMessageOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("DecodedMessage", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.DecodedMessage = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetAccessKeyInfoOutput
+ if *v == nil {
+ sv = &GetAccessKeyInfoOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Account", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Account = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetCallerIdentityOutput
+ if *v == nil {
+ sv = &GetCallerIdentityOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Account", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Account = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("Arn", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.Arn = ptr.String(xtv)
+ }
+
+ case strings.EqualFold("UserId", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.UserId = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetFederationTokenOutput
+ if *v == nil {
+ sv = &GetFederationTokenOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("FederatedUser", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("PackedPolicySize", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ i64, err := strconv.ParseInt(xtv, 10, 64)
+ if err != nil {
+ return err
+ }
+ sv.PackedPolicySize = ptr.Int32(int32(i64))
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *GetSessionTokenOutput
+ if *v == nil {
+ sv = &GetSessionTokenOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
new file mode 100644
index 000000000..cbb19c7f6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
@@ -0,0 +1,13 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package sts provides the API client, operations, and parameter types for AWS
+// Security Token Service.
+//
+// # Security Token Service
+//
+// Security Token Service (STS) enables you to request temporary,
+// limited-privilege credentials for users. This guide provides descriptions of the
+// STS API. For more information about using this service, see [Temporary Security Credentials].
+//
+// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html
+package sts
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
new file mode 100644
index 000000000..945682e1a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
@@ -0,0 +1,1139 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+ return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return fn(region, options)
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+ e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+ for _, fn := range optFns {
+ fn(&e)
+ }
+
+ return EndpointResolverFunc(
+ func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+ if len(e.SigningRegion) == 0 {
+ e.SigningRegion = region
+ }
+ return e, nil
+ },
+ )
+}
+
+type ResolveEndpoint struct {
+ Resolver EndpointResolver
+ Options EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.Resolver == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ eo := m.Options
+ eo.Logger = middleware.GetLogger(ctx)
+
+ var endpoint aws.Endpoint
+ endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
+ if err != nil {
+ nf := (&aws.EndpointNotFoundError{})
+ if errors.As(err, &nf) {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+ return next.HandleSerialize(ctx, in)
+ }
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+ signingName := endpoint.SigningName
+ if len(signingName) == 0 {
+ signingName = "sts"
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ }
+ ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+ ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+ return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Serialize.Insert(&ResolveEndpoint{
+ Resolver: o.EndpointResolver,
+ Options: o.EndpointOptions,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+ return err
+}
+
+type wrappedEndpointResolver struct {
+ awsResolver aws.EndpointResolverWithOptions
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
+}
+
+type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
+
+func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
+ return a(service, region)
+}
+
+var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
+
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
+//
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
+ var resolver aws.EndpointResolverWithOptions
+
+ if awsResolverWithOptions != nil {
+ resolver = awsResolverWithOptions
+ } else if awsResolver != nil {
+ resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint)
+ }
+
+ return &wrappedEndpointResolver{
+ awsResolver: resolver,
+ }
+}
+
+func finalizeClientEndpointResolverOptions(options *Options) {
+ options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage()
+
+ if len(options.EndpointOptions.ResolvedRegion) == 0 {
+ const fipsInfix = "-fips-"
+ const fipsPrefix = "fips-"
+ const fipsSuffix = "-fips"
+
+ if strings.Contains(options.Region, fipsInfix) ||
+ strings.Contains(options.Region, fipsPrefix) ||
+ strings.Contains(options.Region, fipsSuffix) {
+ options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(
+ options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "")
+ options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+ }
+ }
+
+}
+
+func resolveEndpointResolverV2(options *Options) {
+ if options.EndpointResolverV2 == nil {
+ options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+ }
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+ if cfg.BaseEndpoint != nil {
+ o.BaseEndpoint = cfg.BaseEndpoint
+ }
+
+ _, g := os.LookupEnv("AWS_ENDPOINT_URL")
+ _, s := os.LookupEnv("AWS_ENDPOINT_URL_STS")
+
+ if g && !s {
+ return
+ }
+
+ value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources)
+ if found && err == nil {
+ o.BaseEndpoint = &value
+ }
+}
+
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+ // The AWS region used to dispatch the request.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Region
+ Region *string
+
+ // When true, use the dual-stack endpoint. If the configured endpoint does not
+ // support dual-stack, dispatching the request MAY return an error.
+ //
+ // Defaults to
+ // false if no value is provided.
+ //
+ // AWS::UseDualStack
+ UseDualStack *bool
+
+ // When true, send this request to the FIPS-compliant regional endpoint. If the
+ // configured endpoint does not have a FIPS compliant endpoint, dispatching the
+ // request will return an error.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::UseFIPS
+ UseFIPS *bool
+
+ // Override the endpoint used to send this request
+ //
+ // Parameter is
+ // required.
+ //
+ // SDK::Endpoint
+ Endpoint *string
+
+ // Whether the global endpoint should be used, rather then the regional endpoint
+ // for us-east-1.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::STS::UseGlobalEndpoint
+ UseGlobalEndpoint *bool
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+ if p.UseDualStack == nil {
+ return fmt.Errorf("parameter UseDualStack is required")
+ }
+
+ if p.UseFIPS == nil {
+ return fmt.Errorf("parameter UseFIPS is required")
+ }
+
+ if p.UseGlobalEndpoint == nil {
+ return fmt.Errorf("parameter UseGlobalEndpoint is required")
+ }
+
+ return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+ if p.UseDualStack == nil {
+ p.UseDualStack = ptr.Bool(false)
+ }
+
+ if p.UseFIPS == nil {
+ p.UseFIPS = ptr.Bool(false)
+ }
+
+ if p.UseGlobalEndpoint == nil {
+ p.UseGlobalEndpoint = ptr.Bool(false)
+ }
+ return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+ // ResolveEndpoint attempts to resolve the endpoint with the provided options,
+ // returning the endpoint if found. Otherwise an error is returned.
+ ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+ smithyendpoints.Endpoint, error,
+ )
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+ return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+ ctx context.Context, params EndpointParameters,
+) (
+ endpoint smithyendpoints.Endpoint, err error,
+) {
+ params = params.WithDefaults()
+ if err = params.ValidateRequired(); err != nil {
+ return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+ }
+ _UseDualStack := *params.UseDualStack
+ _ = _UseDualStack
+ _UseFIPS := *params.UseFIPS
+ _ = _UseFIPS
+ _UseGlobalEndpoint := *params.UseGlobalEndpoint
+ _ = _UseGlobalEndpoint
+
+ if _UseGlobalEndpoint == true {
+ if !(params.Endpoint != nil) {
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _PartitionResult := *exprVal
+ _ = _PartitionResult
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if _Region == "ap-northeast-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "ap-south-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "ap-southeast-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "ap-southeast-2" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "aws-global" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "ca-central-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-central-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-north-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-west-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-west-2" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-west-3" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "sa-east-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "us-east-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "us-east-2" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "us-west-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "us-west-2" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+ }
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+ }
+ uriString := _Endpoint
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _PartitionResult := *exprVal
+ _ = _PartitionResult
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsFIPS {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+ }
+ }
+ if _UseFIPS == true {
+ if _PartitionResult.SupportsFIPS == true {
+ if _PartitionResult.Name == "aws-us-gov" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+ }
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+ }
+ if _Region == "aws-global" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+ bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+ params := &EndpointParameters{}
+
+ params.Region = bindRegion(options.Region)
+ params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+ params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+ params.Endpoint = options.BaseEndpoint
+
+ if b, ok := input.(endpointParamsBinder); ok {
+ b.bindEndpointParams(params)
+ }
+
+ return params
+}
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveEndpoint")
+ defer span.End()
+
+ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.options.EndpointResolverV2 == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+ endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration",
+ func() (smithyendpoints.Endpoint, error) {
+ return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ span.SetProperty("client.call.resolved_endpoint", endpt.URI.String())
+
+ if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+ endpt.URI.RawPath = endpt.URI.Path
+ }
+ req.URL.Scheme = endpt.URI.Scheme
+ req.URL.Host = endpt.URI.Host
+ req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+ req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+ for k := range endpt.Headers {
+ req.Header.Set(k, endpt.Headers.Get(k))
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+ for _, o := range opts {
+ rscheme.SignerProperties.SetAll(&o.SignerProperties)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
new file mode 100644
index 000000000..935307771
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
@@ -0,0 +1,43 @@
+{
+ "dependencies": {
+ "github.com/aws/aws-sdk-go-v2": "v1.4.0",
+ "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5",
+ "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7",
+ "github.com/aws/smithy-go": "v1.4.0"
+ },
+ "files": [
+ "api_client.go",
+ "api_client_test.go",
+ "api_op_AssumeRole.go",
+ "api_op_AssumeRoleWithSAML.go",
+ "api_op_AssumeRoleWithWebIdentity.go",
+ "api_op_AssumeRoot.go",
+ "api_op_DecodeAuthorizationMessage.go",
+ "api_op_GetAccessKeyInfo.go",
+ "api_op_GetCallerIdentity.go",
+ "api_op_GetFederationToken.go",
+ "api_op_GetSessionToken.go",
+ "auth.go",
+ "deserializers.go",
+ "doc.go",
+ "endpoints.go",
+ "endpoints_config_test.go",
+ "endpoints_test.go",
+ "generated.json",
+ "internal/endpoints/endpoints.go",
+ "internal/endpoints/endpoints_test.go",
+ "options.go",
+ "protocol_test.go",
+ "serializers.go",
+ "snapshot_test.go",
+ "sra_operation_order_test.go",
+ "types/errors.go",
+ "types/types.go",
+ "validators.go"
+ ],
+ "go": "1.23",
+ "module": "github.com/aws/aws-sdk-go-v2/service/sts",
+ "unstable": false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
new file mode 100644
index 000000000..a4dbe82e4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package sts
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.39.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..1ec1ecf65
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
@@ -0,0 +1,566 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"
+ "github.com/aws/smithy-go/logging"
+ "regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+ // Logger is a logging implementation that log events should be sent to.
+ Logger logging.Logger
+
+ // LogDeprecated indicates that deprecated endpoints should be logged to the
+ // provided logger.
+ LogDeprecated bool
+
+ // ResolvedRegion is used to override the region to be resolved, rather then the
+ // using the value passed to the ResolveEndpoint method. This value is used by the
+ // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative
+ // name. You must not set this value directly in your application.
+ ResolvedRegion string
+
+ // DisableHTTPS informs the resolver to return an endpoint that does not use the
+ // HTTPS scheme.
+ DisableHTTPS bool
+
+ // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint.
+ UseDualStackEndpoint aws.DualStackEndpointState
+
+ // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+ UseFIPSEndpoint aws.FIPSEndpointState
+}
+
+func (o Options) GetResolvedRegion() string {
+ return o.ResolvedRegion
+}
+
+func (o Options) GetDisableHTTPS() bool {
+ return o.DisableHTTPS
+}
+
+func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState {
+ return o.UseDualStackEndpoint
+}
+
+func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState {
+ return o.UseFIPSEndpoint
+}
+
+func transformToSharedOptions(options Options) endpoints.Options {
+ return endpoints.Options{
+ Logger: options.Logger,
+ LogDeprecated: options.LogDeprecated,
+ ResolvedRegion: options.ResolvedRegion,
+ DisableHTTPS: options.DisableHTTPS,
+ UseDualStackEndpoint: options.UseDualStackEndpoint,
+ UseFIPSEndpoint: options.UseFIPSEndpoint,
+ }
+}
+
+// Resolver STS endpoint resolver
+type Resolver struct {
+ partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+ if len(region) == 0 {
+ return endpoint, &aws.MissingRegionError{}
+ }
+
+ opt := transformToSharedOptions(options)
+ return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+}
+
+var partitionRegexp = struct {
+ Aws *regexp.Regexp
+ AwsCn *regexp.Regexp
+ AwsEusc *regexp.Regexp
+ AwsIso *regexp.Regexp
+ AwsIsoB *regexp.Regexp
+ AwsIsoE *regexp.Regexp
+ AwsIsoF *regexp.Regexp
+ AwsUsGov *regexp.Regexp
+}{
+
+ Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"),
+ AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"),
+ AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+ AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+ AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
+ AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+}
+
+var defaultPartitions = endpoints.Partitions{
+ {
+ ID: "aws",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "sts.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "sts-fips.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.Aws,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "af-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-east-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-northeast-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-northeast-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-northeast-3",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-south-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-3",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-5",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-6",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-7",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "aws-global",
+ }: endpoints.Endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ca-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-central-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-north-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-south-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-west-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-west-3",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "il-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "me-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "mx-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "sa-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-2-fips",
+ }: endpoints.Endpoint{
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2-fips",
+ }: endpoints.Endpoint{
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "sts.{region}.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsCn,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "cn-north-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "cn-northwest-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-eusc",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsEusc,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIso,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-iso-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-iso-west-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso-b",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.sc2s.sgov.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.sc2s.sgov.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoB,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-isob-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-isob-west-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso-e",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoE,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "eu-isoe-west-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso-f",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoF,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-isof-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-isof-south-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-us-gov",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "sts.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "sts-fips.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsUsGov,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts.us-gov-east-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "sts.us-gov-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts.us-gov-west-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "sts.us-gov-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
new file mode 100644
index 000000000..f60b7d338
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
@@ -0,0 +1,239 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+)
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // The optional application specific identifier appended to the User-Agent header.
+ AppID string
+
+ // This endpoint will be given as input to an EndpointResolverV2. It is used for
+ // providing a custom base endpoint that is subject to modifications by the
+ // processing EndpointResolverV2.
+ BaseEndpoint *string
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The configuration DefaultsMode that the SDK should use when constructing the
+ // clients initial default settings.
+ DefaultsMode aws.DefaultsMode
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ //
+ // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+ // value for this field will likely prevent you from using any endpoint-related
+ // service features released after the introduction of EndpointResolverV2 and
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
+ EndpointResolver EndpointResolver
+
+ // Resolves the endpoint used for a particular service operation. This should be
+ // used over the deprecated EndpointResolver.
+ EndpointResolverV2 EndpointResolverV2
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The client meter provider.
+ MeterProvider metrics.MeterProvider
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client will call
+ // an operation that fails with a retryable error. A value of 0 is ignored, and
+ // will not be used to configure the API client created default retryer, or modify
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry mode the API client will be created with, if
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ RetryMode aws.RetryMode
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer. The kind of
+ // default retry created by the API client can be changed with the RetryMode
+ // option.
+ Retryer aws.Retryer
+
+ // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+ // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+ // should not populate this structure programmatically, or rely on the values here
+ // within your applications.
+ RuntimeEnvironment aws.RuntimeEnvironment
+
+ // The client tracer provider.
+ TracerProvider tracing.TracerProvider
+
+ // The initial DefaultsMode used when the client options were constructed. If the
+ // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ resolvedDefaultsMode aws.DefaultsMode
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // Client registry of operation interceptors.
+ Interceptors smithyhttp.InterceptorRegistry
+
+ // The auth scheme resolver which determines how to authenticate for each
+ // operation.
+ AuthSchemeResolver AuthSchemeResolver
+
+ // The list of auth schemes supported by the client.
+ AuthSchemes []smithyhttp.AuthScheme
+
+ // Priority list of preferred auth scheme names (e.g. sigv4a).
+ AuthSchemePreference []string
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+ to.Interceptors = o.Interceptors.Copy()
+
+ return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+ if schemeID == "aws.auth#sigv4" {
+ return getSigV4IdentityResolver(o)
+ }
+ if schemeID == "smithy.api#noAuth" {
+ return &smithyauth.AnonymousIdentityResolver{}
+ }
+ return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolverV2 = v
+ }
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.Credentials != nil {
+ return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+ }
+ return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+func ignoreAnonymousAuth(options *Options) {
+ if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+ options.Credentials = nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
new file mode 100644
index 000000000..96b222136
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
@@ -0,0 +1,1005 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/query"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "path"
+)
+
+type awsAwsquery_serializeOpAssumeRole struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRole) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AssumeRoleInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("AssumeRole")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpAssumeRoleWithSAML struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AssumeRoleWithSAMLInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("AssumeRoleWithSAML")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("AssumeRoleWithWebIdentity")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpAssumeRoot struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRoot) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRoot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AssumeRootInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("AssumeRoot")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentAssumeRootInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpDecodeAuthorizationMessage struct {
+}
+
+func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DecodeAuthorizationMessageInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("DecodeAuthorizationMessage")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetAccessKeyInfo struct {
+}
+
+func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetAccessKeyInfoInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("GetAccessKeyInfo")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetCallerIdentity struct {
+}
+
+func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetCallerIdentityInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("GetCallerIdentity")
+ body.Key("Version").String("2011-06-15")
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetFederationToken struct {
+}
+
+func (*awsAwsquery_serializeOpGetFederationToken) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetFederationTokenInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("GetFederationToken")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpGetSessionToken struct {
+}
+
+func (*awsAwsquery_serializeOpGetSessionToken) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetSessionTokenInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("GetSessionToken")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error {
+ array := value.Array("member")
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.Arn != nil {
+ objectKey := object.Key("arn")
+ objectKey.String(*v.Arn)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.ContextAssertion != nil {
+ objectKey := object.Key("ContextAssertion")
+ objectKey.String(*v.ContextAssertion)
+ }
+
+ if v.ProviderArn != nil {
+ objectKey := object.Key("ProviderArn")
+ objectKey.String(*v.ProviderArn)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error {
+ array := value.Array("member")
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.Key != nil {
+ objectKey := object.Key("Key")
+ objectKey.String(*v.Key)
+ }
+
+ if v.Value != nil {
+ objectKey := object.Key("Value")
+ objectKey.String(*v.Value)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error {
+ array := value.Array("member")
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error {
+ array := value.Array("member")
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.ExternalId != nil {
+ objectKey := object.Key("ExternalId")
+ objectKey.String(*v.ExternalId)
+ }
+
+ if v.Policy != nil {
+ objectKey := object.Key("Policy")
+ objectKey.String(*v.Policy)
+ }
+
+ if v.PolicyArns != nil {
+ objectKey := object.Key("PolicyArns")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvidedContexts != nil {
+ objectKey := object.Key("ProvidedContexts")
+ if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.RoleArn != nil {
+ objectKey := object.Key("RoleArn")
+ objectKey.String(*v.RoleArn)
+ }
+
+ if v.RoleSessionName != nil {
+ objectKey := object.Key("RoleSessionName")
+ objectKey.String(*v.RoleSessionName)
+ }
+
+ if v.SerialNumber != nil {
+ objectKey := object.Key("SerialNumber")
+ objectKey.String(*v.SerialNumber)
+ }
+
+ if v.SourceIdentity != nil {
+ objectKey := object.Key("SourceIdentity")
+ objectKey.String(*v.SourceIdentity)
+ }
+
+ if v.Tags != nil {
+ objectKey := object.Key("Tags")
+ if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.TokenCode != nil {
+ objectKey := object.Key("TokenCode")
+ objectKey.String(*v.TokenCode)
+ }
+
+ if v.TransitiveTagKeys != nil {
+ objectKey := object.Key("TransitiveTagKeys")
+ if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.Policy != nil {
+ objectKey := object.Key("Policy")
+ objectKey.String(*v.Policy)
+ }
+
+ if v.PolicyArns != nil {
+ objectKey := object.Key("PolicyArns")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.PrincipalArn != nil {
+ objectKey := object.Key("PrincipalArn")
+ objectKey.String(*v.PrincipalArn)
+ }
+
+ if v.RoleArn != nil {
+ objectKey := object.Key("RoleArn")
+ objectKey.String(*v.RoleArn)
+ }
+
+ if v.SAMLAssertion != nil {
+ objectKey := object.Key("SAMLAssertion")
+ objectKey.String(*v.SAMLAssertion)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.Policy != nil {
+ objectKey := object.Key("Policy")
+ objectKey.String(*v.Policy)
+ }
+
+ if v.PolicyArns != nil {
+ objectKey := object.Key("PolicyArns")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.ProviderId != nil {
+ objectKey := object.Key("ProviderId")
+ objectKey.String(*v.ProviderId)
+ }
+
+ if v.RoleArn != nil {
+ objectKey := object.Key("RoleArn")
+ objectKey.String(*v.RoleArn)
+ }
+
+ if v.RoleSessionName != nil {
+ objectKey := object.Key("RoleSessionName")
+ objectKey.String(*v.RoleSessionName)
+ }
+
+ if v.WebIdentityToken != nil {
+ objectKey := object.Key("WebIdentityToken")
+ objectKey.String(*v.WebIdentityToken)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.TargetPrincipal != nil {
+ objectKey := object.Key("TargetPrincipal")
+ objectKey.String(*v.TargetPrincipal)
+ }
+
+ if v.TaskPolicyArn != nil {
+ objectKey := object.Key("TaskPolicyArn")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorType(v.TaskPolicyArn, objectKey); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.EncodedMessage != nil {
+ objectKey := object.Key("EncodedMessage")
+ objectKey.String(*v.EncodedMessage)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.AccessKeyId != nil {
+ objectKey := object.Key("AccessKeyId")
+ objectKey.String(*v.AccessKeyId)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.Name != nil {
+ objectKey := object.Key("Name")
+ objectKey.String(*v.Name)
+ }
+
+ if v.Policy != nil {
+ objectKey := object.Key("Policy")
+ objectKey.String(*v.Policy)
+ }
+
+ if v.PolicyArns != nil {
+ objectKey := object.Key("PolicyArns")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil {
+ return err
+ }
+ }
+
+ if v.Tags != nil {
+ objectKey := object.Key("Tags")
+ if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.SerialNumber != nil {
+ objectKey := object.Key("SerialNumber")
+ objectKey.String(*v.SerialNumber)
+ }
+
+ if v.TokenCode != nil {
+ objectKey := object.Key("TokenCode")
+ objectKey.String(*v.TokenCode)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
new file mode 100644
index 000000000..041629bba
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
@@ -0,0 +1,248 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+)
+
+// The web identity token that was passed is expired or is not valid. Get a new
+// identity token from the identity provider and then retry the request.
+type ExpiredTokenException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ExpiredTokenException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ExpiredTokenException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ExpiredTokenException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ExpiredTokenException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request could not be fulfilled because the identity provider (IDP) that was
+// asked to verify the incoming identity token could not be reached. This is often
+// a transient error caused by network conditions. Retry the request a limited
+// number of times so that you don't exceed the request rate. If the error
+// persists, the identity provider might be down or not responding.
+type IDPCommunicationErrorException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *IDPCommunicationErrorException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *IDPCommunicationErrorException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *IDPCommunicationErrorException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "IDPCommunicationError"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The identity provider (IdP) reported that authentication failed. This might be
+// because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it can
+// also mean that the claim has expired or has been explicitly revoked.
+type IDPRejectedClaimException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *IDPRejectedClaimException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *IDPRejectedClaimException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *IDPRejectedClaimException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "IDPRejectedClaim"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The error returned if the message passed to DecodeAuthorizationMessage was
+// invalid. This can happen if the token contains invalid characters, such as line
+// breaks, or if the message has expired.
+type InvalidAuthorizationMessageException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidAuthorizationMessageException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidAuthorizationMessageException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidAuthorizationMessageException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidAuthorizationMessageException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault {
+ return smithy.FaultClient
+}
+
+// The web identity token that was passed could not be validated by Amazon Web
+// Services. Get a new identity token from the identity provider and then retry the
+// request.
+type InvalidIdentityTokenException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidIdentityTokenException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidIdentityTokenException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidIdentityTokenException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidIdentityToken"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+type MalformedPolicyDocumentException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *MalformedPolicyDocumentException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *MalformedPolicyDocumentException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *MalformedPolicyDocumentException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "MalformedPolicyDocument"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request was rejected because the total packed size of the session policies
+// and session tags combined was too large. An Amazon Web Services conversion
+// compresses the session policy document, session policy ARNs, and session tags
+// into a packed binary format that has a separate limit. The error message
+// indicates by percentage how close the policies and tags are to the upper size
+// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide.
+//
+// You could receive this error even though you meet other defined session policy
+// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide.
+//
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length
+type PackedPolicyTooLargeException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *PackedPolicyTooLargeException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *PackedPolicyTooLargeException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *PackedPolicyTooLargeException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "PackedPolicyTooLarge"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see [Activating and Deactivating STS in an Amazon Web Services Region]in the IAM
+// User Guide.
+//
+// [Activating and Deactivating STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html
+type RegionDisabledException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *RegionDisabledException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *RegionDisabledException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *RegionDisabledException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "RegionDisabledException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
new file mode 100644
index 000000000..dff7a3c2e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
@@ -0,0 +1,144 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ smithydocument "github.com/aws/smithy-go/document"
+ "time"
+)
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+type AssumedRoleUser struct {
+
+ // The ARN of the temporary security credentials that are returned from the AssumeRole
+ // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in
+ // the IAM User Guide.
+ //
+ // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html
+ //
+ // This member is required.
+ Arn *string
+
+ // A unique identifier that contains the role ID and the role session name of the
+ // role that is being assumed. The role ID is generated by Amazon Web Services when
+ // the role is created.
+ //
+ // This member is required.
+ AssumedRoleId *string
+
+ noSmithyDocumentSerde
+}
+
+// Amazon Web Services credentials for API authentication.
+type Credentials struct {
+
+ // The access key ID that identifies the temporary security credentials.
+ //
+ // This member is required.
+ AccessKeyId *string
+
+ // The date on which the current credentials expire.
+ //
+ // This member is required.
+ Expiration *time.Time
+
+ // The secret access key that can be used to sign requests.
+ //
+ // This member is required.
+ SecretAccessKey *string
+
+ // The token that users must pass to the service API to use the temporary
+ // credentials.
+ //
+ // This member is required.
+ SessionToken *string
+
+ noSmithyDocumentSerde
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+type FederatedUser struct {
+
+ // The ARN that specifies the federated user that is associated with the
+ // credentials. For more information about ARNs and how to use them in policies,
+ // see [IAM Identifiers]in the IAM User Guide.
+ //
+ // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html
+ //
+ // This member is required.
+ Arn *string
+
+ // The string that identifies the federated user associated with the credentials,
+ // similar to the unique ID of an IAM user.
+ //
+ // This member is required.
+ FederatedUserId *string
+
+ noSmithyDocumentSerde
+}
+
+// A reference to the IAM managed policy that is passed as a session policy for a
+// role session or a federated user session.
+type PolicyDescriptorType struct {
+
+ // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
+ // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web
+ // Services General Reference.
+ //
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ Arn *string
+
+ noSmithyDocumentSerde
+}
+
+// Contains information about the provided context. This includes the signed and
+// encrypted trusted context assertion and the context provider ARN from which the
+// trusted context assertion was generated.
+type ProvidedContext struct {
+
+ // The signed and encrypted trusted context assertion generated by the context
+ // provider. The trusted context assertion is signed and encrypted by Amazon Web
+ // Services STS.
+ ContextAssertion *string
+
+ // The context provider ARN from which the trusted context assertion was generated.
+ ProviderArn *string
+
+ noSmithyDocumentSerde
+}
+
+// You can pass custom key-value pair attributes when you assume a role or
+// federate a user. These are called session tags. You can then use the session
+// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User
+// Guide.
+//
+// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+type Tag struct {
+
+ // The key for a session tag.
+ //
+ // You can pass up to 50 session tags. The plain text session tag keys can’t
+ // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User
+ // Guide.
+ //
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+ //
+ // This member is required.
+ Key *string
+
+ // The value for a session tag.
+ //
+ // You can pass up to 50 session tags. The plain text session tag values can’t
+ // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User
+ // Guide.
+ //
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+ //
+ // This member is required.
+ Value *string
+
+ noSmithyDocumentSerde
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go
new file mode 100644
index 000000000..1026e2211
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go
@@ -0,0 +1,347 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type validateOpAssumeRole struct {
+}
+
+func (*validateOpAssumeRole) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AssumeRoleInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAssumeRoleInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpAssumeRoleWithSAML struct {
+}
+
+func (*validateOpAssumeRoleWithSAML) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AssumeRoleWithSAMLInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAssumeRoleWithSAMLInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpAssumeRoleWithWebIdentity struct {
+}
+
+func (*validateOpAssumeRoleWithWebIdentity) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpAssumeRoot struct {
+}
+
+func (*validateOpAssumeRoot) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRoot) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AssumeRootInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAssumeRootInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDecodeAuthorizationMessage struct {
+}
+
+func (*validateOpDecodeAuthorizationMessage) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DecodeAuthorizationMessageInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDecodeAuthorizationMessageInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetAccessKeyInfo struct {
+}
+
+func (*validateOpGetAccessKeyInfo) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetAccessKeyInfoInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetAccessKeyInfoInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetFederationToken struct {
+}
+
+func (*validateOpGetFederationToken) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetFederationTokenInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetFederationTokenInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After)
+}
+
+func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After)
+}
+
+func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After)
+}
+
+func addOpAssumeRootValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAssumeRoot{}, middleware.After)
+}
+
+func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After)
+}
+
+func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After)
+}
+
+func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After)
+}
+
+func validateTag(v *types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Tag"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.Value == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Value"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTagListType(v []types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TagListType"}
+ for i := range v {
+ if err := validateTag(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpAssumeRoleInput(v *AssumeRoleInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"}
+ if v.RoleArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleArn"))
+ }
+ if v.RoleSessionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName"))
+ }
+ if v.Tags != nil {
+ if err := validateTagListType(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"}
+ if v.RoleArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleArn"))
+ }
+ if v.PrincipalArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn"))
+ }
+ if v.SAMLAssertion == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"}
+ if v.RoleArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleArn"))
+ }
+ if v.RoleSessionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName"))
+ }
+ if v.WebIdentityToken == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpAssumeRootInput(v *AssumeRootInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AssumeRootInput"}
+ if v.TargetPrincipal == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetPrincipal"))
+ }
+ if v.TaskPolicyArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TaskPolicyArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"}
+ if v.EncodedMessage == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"}
+ if v.AccessKeyId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"}
+ if v.Name == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Name"))
+ }
+ if v.Tags != nil {
+ if err := validateTagListType(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index 776e31b21..c483e0cb8 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -442,6 +442,17 @@ func (c *Config) WithUseDualStack(enable bool) *Config {
return c
}
+// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseFIPSEndpoint(enable bool) *Config {
+ if enable {
+ c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled
+ } else {
+ c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled
+ }
+ return c
+}
+
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
// returning a Config pointer for chaining.
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
index 785f30d8e..329f788a3 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -31,6 +31,8 @@ package endpointcreds
import (
"encoding/json"
+ "fmt"
+ "strings"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -69,7 +71,37 @@ type Provider struct {
// Optional authorization token value if set will be used as the value of
// the Authorization header of the endpoint credential request.
+ //
+ // When constructed from environment, the provider will use the value of
+ // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token
+ //
+ // Will be overridden if AuthorizationTokenProvider is configured
AuthorizationToken string
+
+ // Optional auth provider func to dynamically load the auth token from a file
+ // everytime a credential is retrieved
+ //
+ // When constructed from environment, the provider will read and use the content
+ // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable
+ // as the auth token everytime credentials are retrieved
+ //
+ // Will override AuthorizationToken if configured
+ AuthorizationTokenProvider AuthTokenProvider
+}
+
+// AuthTokenProvider defines an interface to dynamically load a value to be passed
+// for the Authorization header of a credentials request.
+type AuthTokenProvider interface {
+ GetToken() (string, error)
+}
+
+// TokenProviderFunc is a func type implementing AuthTokenProvider interface
+// and enables customizing token provider behavior
+type TokenProviderFunc func() (string, error)
+
+// GetToken func retrieves auth token according to TokenProviderFunc implementation
+func (p TokenProviderFunc) GetToken() (string, error) {
+ return p()
}
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
@@ -164,7 +196,20 @@ func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error
req := p.Client.NewRequest(op, nil, out)
req.SetContext(ctx)
req.HTTPRequest.Header.Set("Accept", "application/json")
- if authToken := p.AuthorizationToken; len(authToken) != 0 {
+
+ authToken := p.AuthorizationToken
+ var err error
+ if p.AuthorizationTokenProvider != nil {
+ authToken, err = p.AuthorizationTokenProvider.GetToken()
+ if err != nil {
+ return nil, fmt.Errorf("get authorization token: %v", err)
+ }
+ }
+
+ if strings.ContainsAny(authToken, "\r\n") {
+ return nil, fmt.Errorf("authorization token contains invalid newline sequence")
+ }
+ if len(authToken) != 0 {
req.HTTPRequest.Header.Set("Authorization", authToken)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
index e39903284..1ba80b576 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -9,6 +9,7 @@ package defaults
import (
"fmt"
+ "io/ioutil"
"net"
"net/http"
"net/url"
@@ -115,9 +116,31 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro
const (
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+ httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
)
+// direct representation of the IPv4 address for the ECS container
+// "169.254.170.2"
+var ecsContainerIPv4 net.IP = []byte{
+ 169, 254, 170, 2,
+}
+
+// direct representation of the IPv4 address for the EKS container
+// "169.254.170.23"
+var eksContainerIPv4 net.IP = []byte{
+ 169, 254, 170, 23,
+}
+
+// direct representation of the IPv6 address for the EKS container
+// "fd00:ec2::23"
+var eksContainerIPv6 net.IP = []byte{
+ 0xFD, 0, 0xE, 0xC2,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0x23,
+}
+
// RemoteCredProvider returns a credentials provider for the default remote
// endpoints such as EC2 or ECS Roles.
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
@@ -135,19 +158,22 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
var lookupHostFn = net.LookupHost
-func isLoopbackHost(host string) (bool, error) {
- ip := net.ParseIP(host)
- if ip != nil {
- return ip.IsLoopback(), nil
+// isAllowedHost allows host to be loopback or known ECS/EKS container IPs
+//
+// host can either be an IP address OR an unresolved hostname - resolution will
+// be automatically performed in the latter case
+func isAllowedHost(host string) (bool, error) {
+ if ip := net.ParseIP(host); ip != nil {
+ return isIPAllowed(ip), nil
}
- // Host is not an ip, perform lookup
addrs, err := lookupHostFn(host)
if err != nil {
return false, err
}
+
for _, addr := range addrs {
- if !net.ParseIP(addr).IsLoopback() {
+ if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) {
return false, nil
}
}
@@ -155,6 +181,13 @@ func isLoopbackHost(host string) (bool, error) {
return true, nil
}
+func isIPAllowed(ip net.IP) bool {
+ return ip.IsLoopback() ||
+ ip.Equal(ecsContainerIPv4) ||
+ ip.Equal(eksContainerIPv4) ||
+ ip.Equal(eksContainerIPv6)
+}
+
func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
var errMsg string
@@ -165,10 +198,12 @@ func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string)
host := aws.URLHostname(parsed)
if len(host) == 0 {
errMsg = "unable to parse host from local HTTP cred provider URL"
- } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
- errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
- } else if !isLoopback {
- errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
+ } else if parsed.Scheme == "http" {
+ if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil {
+ errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr)
+ } else if !isAllowedHost {
+ errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host)
+ }
}
}
@@ -190,6 +225,15 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
+ if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" {
+ p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) {
+ if contents, err := ioutil.ReadFile(authFilePath); err != nil {
+ return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err)
+ } else {
+ return string(contents), nil
+ }
+ })
+ }
},
)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
index 604aeffde..f1f9ba4ec 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
@@ -2,6 +2,7 @@ package ec2metadata
import (
"fmt"
+ "github.com/aws/aws-sdk-go/aws"
"net/http"
"sync/atomic"
"time"
@@ -65,7 +66,9 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
switch requestFailureError.StatusCode() {
case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed:
atomic.StoreUint32(&t.disabled, 1)
- t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError))
+ if t.client.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) {
+ t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError))
+ }
case http.StatusBadRequest:
r.Error = requestFailureError
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 2a44182a9..c3516e018 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -31,6 +31,7 @@ const (
ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta).
ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne).
CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ CaWest1RegionID = "ca-west-1" // Canada West (Calgary).
EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt).
EuCentral2RegionID = "eu-central-2" // Europe (Zurich).
EuNorth1RegionID = "eu-north-1" // Europe (Stockholm).
@@ -73,7 +74,9 @@ const (
)
// AWS ISOE (Europe) partition's regions.
-const ()
+const (
+ EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West.
+)
// AWS ISOF partition's regions.
const ()
@@ -190,6 +193,9 @@ var awsPartition = partition{
"ca-central-1": region{
Description: "Canada (Central)",
},
+ "ca-west-1": region{
+ Description: "Canada West (Calgary)",
+ },
"eu-central-1": region{
Description: "Europe (Frankfurt)",
},
@@ -240,13 +246,6 @@ var awsPartition = partition{
},
},
Services: services{
- "a4b": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
"access-analyzer": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -291,6 +290,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -324,6 +332,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -477,6 +494,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "acm-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "acm-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -636,6 +671,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "acm-pca-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "acm-pca-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -669,6 +713,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "acm-pca-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -755,32 +808,69 @@ var awsPartition = partition{
},
},
},
+ "agreement-marketplace": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
"airflow": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -790,6 +880,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -799,6 +898,9 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -815,6 +917,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -873,6 +978,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -931,6 +1039,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -986,18 +1097,33 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -1040,6 +1166,21 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "api.detective-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "api.detective-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -1247,6 +1388,14 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "api.ecr.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpointKey{
Region: "dkr-us-east-1",
}: endpoint{
@@ -1818,21 +1967,48 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -1910,6 +2086,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -2208,6 +2387,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "apigateway-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "apigateway-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -2241,6 +2429,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "apigateway-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -2399,6 +2596,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -2487,6 +2687,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -2572,21 +2775,81 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "appflow-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "appflow-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "appflow-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "appflow-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appflow-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appflow-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appflow-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appflow-fips.us-west-2.amazonaws.com",
+ },
},
},
"application-autoscaling": service{
@@ -2632,6 +2895,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -3085,6 +3351,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -3097,6 +3366,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -3275,6 +3550,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -3302,6 +3580,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -3414,6 +3695,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -3574,6 +3858,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "athena.ca-central-1.api.aws",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "athena.ca-west-1.api.aws",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -3833,15 +4126,75 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "auditmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"autoscaling": service{
@@ -3887,6 +4240,21 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -3911,6 +4279,60 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "autoscaling-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "autoscaling-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "il-central-1",
}: endpoint{},
@@ -3926,15 +4348,39 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "autoscaling-fips.us-west-2.amazonaws.com",
+ },
},
},
"autoscaling-plans": service{
@@ -4050,6 +4496,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -4167,91 +4616,6 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "backupstorage": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
"batch": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -4298,6 +4662,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -4408,6 +4775,286 @@ var awsPartition = partition{
},
},
},
+ "bedrock": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "bedrock-ap-northeast-1",
+ }: endpoint{
+ Hostname: "bedrock.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-ap-south-1",
+ }: endpoint{
+ Hostname: "bedrock.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-ap-southeast-1",
+ }: endpoint{
+ Hostname: "bedrock.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-ap-southeast-2",
+ }: endpoint{
+ Hostname: "bedrock.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-ca-central-1",
+ }: endpoint{
+ Hostname: "bedrock.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-eu-central-1",
+ }: endpoint{
+ Hostname: "bedrock.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-eu-west-1",
+ }: endpoint{
+ Hostname: "bedrock.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-eu-west-2",
+ }: endpoint{
+ Hostname: "bedrock.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-eu-west-3",
+ }: endpoint{
+ Hostname: "bedrock.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-fips-ca-central-1",
+ }: endpoint{
+ Hostname: "bedrock-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-fips-us-east-1",
+ }: endpoint{
+ Hostname: "bedrock-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-fips-us-west-2",
+ }: endpoint{
+ Hostname: "bedrock-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ap-northeast-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ap-south-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ap-southeast-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ap-southeast-2",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-ca-central-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-eu-central-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-eu-west-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-eu-west-2",
+ }: endpoint{
+ Hostname: "bedrock-runtime.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-eu-west-3",
+ }: endpoint{
+ Hostname: "bedrock-runtime.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-fips-ca-central-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-fips-us-east-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-fips-us-west-2",
+ }: endpoint{
+ Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-sa-east-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-us-east-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-us-west-2",
+ }: endpoint{
+ Hostname: "bedrock-runtime.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-sa-east-1",
+ }: endpoint{
+ Hostname: "bedrock.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-us-east-1",
+ }: endpoint{
+ Hostname: "bedrock.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-us-west-2",
+ }: endpoint{
+ Hostname: "bedrock.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"billingconductor": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -4424,6 +5071,9 @@ var awsPartition = partition{
},
"braket": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
@@ -4454,6 +5104,12 @@ var awsPartition = partition{
},
"cases": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -4668,66 +5324,262 @@ var awsPartition = partition{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
- Region: "ap-southeast-2",
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
}: endpoint{},
endpointKey{
- Region: "ca-central-1",
+ Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
- Region: "eu-central-1",
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
}: endpoint{},
endpointKey{
- Region: "eu-north-1",
+ Region: "ca-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-south-1",
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
}: endpoint{},
endpointKey{
- Region: "eu-west-1",
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
}: endpoint{},
endpointKey{
- Region: "eu-west-2",
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
}: endpoint{},
endpointKey{
- Region: "eu-west-3",
+ Region: "eu-north-1",
}: endpoint{},
endpointKey{
- Region: "me-south-1",
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
}: endpoint{},
endpointKey{
- Region: "sa-east-1",
+ Region: "eu-south-1",
}: endpoint{},
endpointKey{
- Region: "us-east-1",
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
}: endpoint{},
endpointKey{
- Region: "us-east-2",
+ Region: "eu-west-1",
}: endpoint{},
endpointKey{
- Region: "us-west-1",
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "cloud9-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "cloud9-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "cloud9-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "cloud9-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "cloud9-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloud9-fips.us-west-2.api.aws",
+ },
},
},
"cloudcontrolapi": service{
@@ -4735,69 +5587,216 @@ var awsPartition = partition{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.af-south-1.api.aws",
+ },
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-east-1.api.aws",
+ },
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-northeast-1.api.aws",
+ },
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-northeast-2.api.aws",
+ },
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-northeast-3.api.aws",
+ },
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-south-1.api.aws",
+ },
endpointKey{
Region: "ap-south-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-south-2.api.aws",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-southeast-1.api.aws",
+ },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-southeast-2.api.aws",
+ },
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-southeast-3.api.aws",
+ },
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ap-southeast-4.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ca-central-1.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-central-1.api.aws",
+ },
endpointKey{
Region: "eu-central-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-central-2.api.aws",
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-north-1.api.aws",
+ },
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-south-1.api.aws",
+ },
endpointKey{
Region: "eu-south-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-south-2.api.aws",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-west-1.api.aws",
+ },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-west-2.api.aws",
+ },
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.eu-west-3.api.aws",
+ },
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@@ -4807,6 +5806,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -4846,51 +5854,123 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.il-central-1.api.aws",
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.me-central-1.api.aws",
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.me-south-1.api.aws",
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.sa-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-west-2.api.aws",
+ },
},
},
"clouddirectory": service{
@@ -4962,6 +6042,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -5121,6 +6204,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -5252,6 +6338,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -5412,6 +6501,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -5835,6 +6927,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -5993,15 +7088,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -6023,6 +7130,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -6077,6 +7187,12 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -6279,6 +7395,12 @@ var awsPartition = partition{
},
"cognito-identity": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -6291,24 +7413,42 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -6357,6 +7497,9 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -6403,6 +7546,12 @@ var awsPartition = partition{
},
"cognito-idp": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -6415,24 +7564,42 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -6481,6 +7648,9 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -6660,12 +7830,27 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -6772,6 +7957,14 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "compute-optimizer.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -6788,6 +7981,22 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -6804,6 +8013,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "compute-optimizer.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -6820,6 +8037,14 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "compute-optimizer.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -6844,6 +8069,22 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "compute-optimizer.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "compute-optimizer.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -6932,6 +8173,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -7212,6 +8456,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -7221,6 +8468,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -7239,15 +8489,39 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -7260,6 +8534,9 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -7340,6 +8617,18 @@ var awsPartition = partition{
},
},
},
+ "cost-optimization-hub": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "cost-optimization-hub.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
"cur": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -7876,6 +9165,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "datasync-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datasync-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -7909,6 +9207,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "datasync-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -7995,6 +9302,190 @@ var awsPartition = partition{
},
},
},
+ "datazone": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "datazone.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "datazone.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "datazone.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "datazone.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "datazone.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "datazone.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "datazone.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "datazone.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "datazone.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "datazone.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "datazone.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "datazone.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datazone-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "datazone.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "datazone.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "datazone.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "datazone.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "datazone.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "datazone.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "datazone.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "datazone.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "datazone.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "datazone.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "datazone.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "datazone.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "datazone.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "datazone.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datazone-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "datazone.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datazone-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "datazone.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "datazone.us-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datazone-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
"dax": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -8012,6 +9503,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -8215,6 +9712,21 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "directconnect-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "directconnect-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -8239,6 +9751,24 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "directconnect-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "directconnect-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -8388,6 +9918,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -8412,6 +9945,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -8473,6 +10009,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "dms",
}: endpoint{
@@ -8788,110 +10327,10 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "ds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ds-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "ds-fips.us-east-1.amazonaws.com",
+ Hostname: "drs-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -8900,7 +10339,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "ds-fips.us-east-2.amazonaws.com",
+ Hostname: "drs-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -8909,7 +10348,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "ds-fips.us-west-1.amazonaws.com",
+ Hostname: "drs-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -8918,7 +10357,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "ds-fips.us-west-2.amazonaws.com",
+ Hostname: "drs-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
@@ -8943,7 +10382,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ds-fips.us-east-1.amazonaws.com",
+ Hostname: "drs-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -8952,7 +10391,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ds-fips.us-east-2.amazonaws.com",
+ Hostname: "drs-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -8961,7 +10400,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ds-fips.us-west-1.amazonaws.com",
+ Hostname: "drs-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -8970,16 +10409,11 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "ds-fips.us-west-2.amazonaws.com",
+ Hostname: "drs-fips.us-west-2.amazonaws.com",
},
},
},
- "dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
+ "ds": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -9021,16 +10455,16 @@ var awsPartition = partition{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ Hostname: "ds-fips.ca-central-1.amazonaws.com",
},
endpointKey{
- Region: "ca-central-1-fips",
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "ds-fips.ca-west-1.amazonaws.com",
},
endpointKey{
Region: "eu-central-1",
@@ -9057,17 +10491,62 @@ var awsPartition = partition{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
- Region: "il-central-1",
- }: endpoint{},
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "ds-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
- Region: "local",
+ Region: "fips-ca-west-1",
}: endpoint{
- Hostname: "localhost:8000",
- Protocols: []string{"http"},
+ Hostname: "ds-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "ds-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "ds-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "ds-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "ds-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -9084,16 +10563,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "ds-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -9102,16 +10572,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "ds-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -9120,16 +10581,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "ds-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -9138,20 +10590,206 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "ds-fips.us-west-2.amazonaws.com",
},
},
},
- "ebs": service{
+ "dynamodb": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "local",
+ }: endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "ebs": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -9195,6 +10833,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "ebs-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ebs-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -9228,6 +10875,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "ebs-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -9369,6 +11025,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "ec2-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ec2-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -9408,6 +11073,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "ec2-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -9556,6 +11230,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -9737,6 +11414,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -9847,6 +11527,166 @@ var awsPartition = partition{
},
},
},
+ "eks-auth": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "eks-auth.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "eks-auth.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "eks-auth.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "eks-auth.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "eks-auth.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "eks-auth.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "eks-auth.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "eks-auth.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "eks-auth.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "eks-auth.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "eks-auth.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "eks-auth.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "eks-auth.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "eks-auth.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "eks-auth.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "eks-auth.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "eks-auth.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "eks-auth.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "eks-auth.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "eks-auth.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "eks-auth.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "eks-auth.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "eks-auth.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "eks-auth.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "eks-auth.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "eks-auth.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "eks-auth.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "eks-auth.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "eks-auth.us-west-2.api.aws",
+ },
+ },
+ },
"elasticache": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -9885,6 +11725,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -10247,6 +12090,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -10427,6 +12279,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-central-1",
}: endpoint{
@@ -10688,6 +12549,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -10848,6 +12712,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -10883,6 +12756,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -10946,6 +12828,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
@@ -11029,6 +12917,12 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "email-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -11047,6 +12941,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "email-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -11056,6 +12959,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "email-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "email-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@@ -11086,9 +13007,21 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "email-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "email-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -11114,6 +13047,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -11123,6 +13059,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -11135,12 +13074,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -11195,6 +13140,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -11241,6 +13189,9 @@ var awsPartition = partition{
},
"emr-serverless": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -11250,6 +13201,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -11259,6 +13213,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -11274,6 +13231,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -11328,6 +13291,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -11496,6 +13462,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "aos.ca-central-1.api.aws",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "aos.ca-west-1.api.aws",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -11749,6 +13724,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -11910,12 +13888,27 @@ var awsPartition = partition{
},
"finspace": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -11984,6 +13977,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -12185,6 +14181,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "fms-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fms-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -12311,6 +14316,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "fms-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-central-1",
}: endpoint{
@@ -12696,6 +14710,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "fsx-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -12729,6 +14752,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-prod-ca-central-1",
}: endpoint{
@@ -12738,6 +14770,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-prod-ca-west-1",
+ }: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-prod-us-east-1",
}: endpoint{
@@ -12837,6 +14878,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "prod-ca-west-1",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "prod-ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "prod-us-east-1",
}: endpoint{
@@ -13017,16 +15076,6 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "gamesparks": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
"geo": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -13220,6 +15269,18 @@ var awsPartition = partition{
},
},
},
+ "globalaccelerator": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
"glue": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -13258,6 +15319,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -13687,6 +15751,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -13865,13 +15932,6 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "honeycode": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
"iam": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -13976,6 +16036,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -13985,9 +16048,15 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -14000,6 +16069,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -14012,6 +16084,9 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -14256,6 +16331,9 @@ var awsPartition = partition{
},
"inspector2": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@@ -14265,6 +16343,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -14274,12 +16355,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@@ -14454,6 +16541,11 @@ var awsPartition = partition{
}: endpoint{
Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "internetmonitor.ca-west-1.api.aws",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -14996,16 +17088,6 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "iotroborunner": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
"iotsecuredtunneling": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -15275,12 +17357,45 @@ var awsPartition = partition{
},
"iottwinmaker": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "api-ap-northeast-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "api-ap-northeast-2",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "api-ap-south-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
endpointKey{
Region: "api-ap-southeast-1",
}: endpoint{
@@ -15329,6 +17444,30 @@ var awsPartition = partition{
Region: "us-west-2",
},
},
+ endpointKey{
+ Region: "data-ap-northeast-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "data-ap-northeast-2",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "data-ap-south-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
endpointKey{
Region: "data-ap-southeast-1",
}: endpoint{
@@ -15616,6 +17755,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "kafka-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kafka-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -15649,6 +17797,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "kafka-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -15804,12 +17961,27 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kendra-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "kendra-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -15945,6 +18117,11 @@ var awsPartition = partition{
}: endpoint{
Hostname: "kendra-ranking-fips.ca-central-1.api.aws",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "kendra-ranking.ca-west-1.api.aws",
+ },
endpointKey{
Region: "eu-central-2",
}: endpoint{
@@ -16073,6 +18250,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -16221,6 +18401,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -16550,6 +18733,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kms-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "kms-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -16878,6 +19079,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -17098,6 +19302,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "lambda.ca-central-1.api.aws",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lambda.ca-west-1.api.aws",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -17324,6 +19537,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -17333,18 +19549,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -17393,6 +19621,9 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -17475,6 +19706,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -17586,185 +19820,6 @@ var awsPartition = partition{
},
},
"license-manager-user-subscriptions": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "lightsail": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "logs": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
@@ -17802,6 +19857,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -17826,6 +19884,359 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "lightsail": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "logs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "logs-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "logs-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "logs-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "logs-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -17865,18 +20276,48 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.il-central-1.api.aws",
+ },
endpointKey{
Region: "me-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.me-central-1.api.aws",
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.me-south-1.api.aws",
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.sa-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
@@ -17886,6 +20327,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
@@ -17895,6 +20342,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
@@ -17904,6 +20357,12 @@ var awsPartition = partition{
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "logs.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
@@ -17983,12 +20442,18 @@ var awsPartition = partition{
},
"m2": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -18008,6 +20473,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -18047,6 +20521,9 @@ var awsPartition = partition{
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -18090,46 +20567,6 @@ var awsPartition = partition{
}: endpoint{},
},
},
- "macie": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "macie-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "macie-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "macie-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "macie-fips.us-west-2.amazonaws.com",
- },
- },
- },
"macie2": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -18282,6 +20719,13 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "managedblockchain-query": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ },
+ },
"marketplacecommerceanalytics": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -18291,12 +20735,30 @@ var awsPartition = partition{
},
"media-pipelines-chime": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -18355,12 +20817,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -18379,6 +20847,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -18491,6 +20962,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -18540,15 +21014,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -18591,6 +21077,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -18631,6 +21120,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -18640,6 +21132,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -18683,6 +21178,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -18692,6 +21190,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -18735,6 +21236,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@@ -18744,6 +21248,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -18812,12 +21319,48 @@ var awsPartition = partition{
},
"meetings-chime": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
endpointKey{
Region: "il-central-1",
}: endpoint{},
@@ -19083,6 +21626,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -19516,6 +22062,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -19664,6 +22213,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -19724,6 +22276,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -19977,227 +22532,236 @@ var awsPartition = partition{
Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
},
endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "networkmanager": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "networkmanager.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "aws-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "networkmanager-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "fips-aws-global",
- }: endpoint{
- Hostname: "networkmanager-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "nimble": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "oam": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "networkmanager": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "networkmanager.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "aws-global",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "networkmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-aws-global",
+ }: endpoint{
+ Hostname: "networkmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "nimble": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
+ "oam": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
@@ -20299,6 +22863,14 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "oidc.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -20323,6 +22895,14 @@ var awsPartition = partition{
Region: "ap-southeast-3",
},
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "oidc.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -20331,6 +22911,14 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "oidc.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -20339,6 +22927,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "oidc.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -20355,6 +22951,14 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "oidc.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -20387,6 +22991,14 @@ var awsPartition = partition{
Region: "il-central-1",
},
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "oidc.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -20489,6 +23101,14 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "omics.il-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
endpointKey{
Region: "us-east-1",
}: endpoint{
@@ -20642,21 +23262,36 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -20927,88 +23562,490 @@ var awsPartition = partition{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.af-south-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-east-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-northeast-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-northeast-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-northeast-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-northeast-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-northeast-3",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-northeast-3.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-south-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-south-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-south-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-south-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-southeast-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-southeast-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-southeast-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-southeast-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-southeast-3",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-southeast-3.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ap-southeast-4",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ap-southeast-4.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "ca-central-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ca-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.ca-central-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.ca-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.ca-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.ca-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.ca-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-central-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-central-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-central-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-north-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-north-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-south-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-south-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-south-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-south-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-west-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-west-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-west-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "eu-west-3",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.eu-west-3.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "pi-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "pi-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "pi-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "pi-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "pi-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "pi-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "il-central-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.il-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "me-central-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.me-central-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "me-south-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.me-south-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "sa-east-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.sa-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-east-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-east-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-east-2.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-east-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-east-2.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-west-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-west-2",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-west-2.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-west-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-west-2.api.aws",
+ Protocols: []string{"https"},
+ },
},
},
"pinpoint": service{
@@ -21170,6 +24207,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -21185,12 +24225,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -21397,6 +24443,14 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "portal.sso.ap-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -21421,6 +24475,14 @@ var awsPartition = partition{
Region: "ap-southeast-3",
},
},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "portal.sso.ap-southeast-4.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{
@@ -21429,6 +24491,14 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "portal.sso.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -21437,6 +24507,14 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "portal.sso.eu-central-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -21453,6 +24531,14 @@ var awsPartition = partition{
Region: "eu-south-1",
},
},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "portal.sso.eu-south-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -21485,6 +24571,14 @@ var awsPartition = partition{
Region: "il-central-1",
},
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "portal.sso.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -21535,6 +24629,19 @@ var awsPartition = partition{
},
},
},
+ "private-networks": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"profile": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -21676,6 +24783,166 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "qbusiness": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "qbusiness.af-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "qbusiness.ap-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{
+ Hostname: "qbusiness.ap-northeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{
+ Hostname: "qbusiness.ap-northeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{
+ Hostname: "qbusiness.ap-northeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{
+ Hostname: "qbusiness.ap-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "qbusiness.ap-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{
+ Hostname: "qbusiness.ap-southeast-1.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{
+ Hostname: "qbusiness.ap-southeast-2.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "qbusiness.ap-southeast-3.api.aws",
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "qbusiness.ap-southeast-4.api.aws",
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "qbusiness.ca-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "qbusiness.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{
+ Hostname: "qbusiness.eu-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "qbusiness.eu-central-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{
+ Hostname: "qbusiness.eu-north-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "qbusiness.eu-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "qbusiness.eu-south-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{
+ Hostname: "qbusiness.eu-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{
+ Hostname: "qbusiness.eu-west-2.api.aws",
+ },
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{
+ Hostname: "qbusiness.eu-west-3.api.aws",
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "qbusiness.il-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "qbusiness.me-central-1.api.aws",
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "qbusiness.me-south-1.api.aws",
+ },
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{
+ Hostname: "qbusiness.sa-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "qbusiness.us-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{
+ Hostname: "qbusiness.us-east-2.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{
+ Hostname: "qbusiness.us-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "qbusiness.us-west-2.api.aws",
+ },
+ },
+ },
"qldb": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -21775,6 +25042,9 @@ var awsPartition = partition{
},
"quicksight": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -21790,15 +25060,27 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "api",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -21866,6 +25148,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "ram-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ram-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -21899,6 +25190,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "ram-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -22029,6 +25329,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "rbin-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rbin-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -22062,6 +25371,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "rbin-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -22201,6 +25519,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "rds-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -22243,6 +25579,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "rds-fips.ca-west-1",
+ }: endpoint{
+ Hostname: "rds-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "rds-fips.us-east-1",
}: endpoint{
@@ -22297,6 +25642,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "rds.ca-west-1",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "rds.ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "rds.us-east-1",
}: endpoint{
@@ -22599,6 +25962,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "redshift-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -22632,6 +26004,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "redshift-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -22738,12 +26119,24 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -22753,18 +26146,93 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com",
+ },
},
},
"rekognition": service{
@@ -23088,123 +26556,94 @@ var awsPartition = partition{
},
},
"resource-explorer-2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "resource-explorer-2.ap-northeast-1.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "resource-explorer-2.ap-northeast-2.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "resource-explorer-2.ap-northeast-3.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
- }: endpoint{
- Hostname: "resource-explorer-2.ap-south-1.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "ap-south-2",
- }: endpoint{
- Hostname: "resource-explorer-2.ap-south-2.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "resource-explorer-2.ap-southeast-1.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "resource-explorer-2.ap-southeast-2.api.aws",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "resource-explorer-2.ap-southeast-4.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
- }: endpoint{
- Hostname: "resource-explorer-2.ca-central-1.api.aws",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
- }: endpoint{
- Hostname: "resource-explorer-2.eu-central-1.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "eu-central-2",
- }: endpoint{
- Hostname: "resource-explorer-2.eu-central-2.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
- }: endpoint{
- Hostname: "resource-explorer-2.eu-north-1.api.aws",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
- }: endpoint{
- Hostname: "resource-explorer-2.eu-west-1.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
- }: endpoint{
- Hostname: "resource-explorer-2.eu-west-2.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "eu-west-3",
- }: endpoint{
- Hostname: "resource-explorer-2.eu-west-3.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "il-central-1",
- }: endpoint{
- Hostname: "resource-explorer-2.il-central-1.api.aws",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
- }: endpoint{
- Hostname: "resource-explorer-2.sa-east-1.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
- }: endpoint{
- Hostname: "resource-explorer-2.us-east-1.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "us-east-2",
- }: endpoint{
- Hostname: "resource-explorer-2.us-east-2.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "us-west-1",
- }: endpoint{
- Hostname: "resource-explorer-2.us-west-1.api.aws",
- },
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
- }: endpoint{
- Hostname: "resource-explorer-2.us-west-2.api.aws",
- },
+ }: endpoint{},
},
},
"resource-groups": service{
@@ -23245,6 +26684,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -23400,6 +26842,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -23409,18 +26854,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -23430,6 +26887,48 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -23439,15 +26938,39 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com",
+ },
},
},
"route53": service{
@@ -23544,6 +27067,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -23596,33 +27122,81 @@ var awsPartition = partition{
},
"rum": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -23784,6 +27358,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -24061,6 +27638,27 @@ var awsPartition = partition{
}: endpoint{
Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3.dualstack.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -24146,6 +27744,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "s3-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -24359,6 +27966,44 @@ var awsPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{
+ Hostname: "s3-control.af-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.af-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{
+ Hostname: "s3-control.ap-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
endpointKey{
Region: "ap-northeast-1",
}: endpoint{
@@ -24435,6 +28080,25 @@ var awsPartition = partition{
Region: "ap-south-1",
},
},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{
+ Hostname: "s3-control.ap-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{
@@ -24474,51 +28138,138 @@ var awsPartition = partition{
},
},
endpointKey{
- Region: "ca-central-1",
+ Region: "ap-southeast-3",
+ }: endpoint{
+ Hostname: "s3-control.ap-southeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{
+ Hostname: "s3-control.ap-southeast-4.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{
+ Hostname: "s3-control.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "ca-west-1",
}: endpoint{
- Hostname: "s3-control.ca-central-1.amazonaws.com",
+ Hostname: "s3-control.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
- Region: "ca-central-1",
+ Region: "ca-west-1",
},
},
endpointKey{
- Region: "ca-central-1",
+ Region: "ca-west-1",
Variant: dualStackVariant,
}: endpoint{
- Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com",
+ Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
- Region: "ca-central-1",
+ Region: "ca-west-1",
},
},
endpointKey{
- Region: "ca-central-1",
+ Region: "ca-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "s3-control-fips.ca-central-1.amazonaws.com",
+ Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
- Region: "ca-central-1",
+ Region: "ca-west-1",
},
},
endpointKey{
- Region: "ca-central-1",
+ Region: "ca-west-1",
Variant: fipsVariant | dualStackVariant,
}: endpoint{
- Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com",
+ Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
- Region: "ca-central-1",
+ Region: "ca-west-1",
},
},
endpointKey{
- Region: "ca-central-1-fips",
+ Region: "ca-west-1-fips",
}: endpoint{
- Hostname: "s3-control-fips.ca-central-1.amazonaws.com",
+ Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
SignatureVersions: []string{"s3v4"},
CredentialScope: credentialScope{
- Region: "ca-central-1",
+ Region: "ca-west-1",
},
Deprecated: boxedTrue,
},
@@ -24541,6 +28292,25 @@ var awsPartition = partition{
Region: "eu-central-1",
},
},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{
+ Hostname: "s3-control.eu-central-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpointKey{
Region: "eu-north-1",
}: endpoint{
@@ -24560,6 +28330,44 @@ var awsPartition = partition{
Region: "eu-north-1",
},
},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{
+ Hostname: "s3-control.eu-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{
+ Hostname: "s3-control.eu-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{
@@ -24617,6 +28425,63 @@ var awsPartition = partition{
Region: "eu-west-3",
},
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{
+ Hostname: "s3-control.il-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.il-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "s3-control.me-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.me-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{
+ Hostname: "s3-control.me-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.me-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
endpointKey{
Region: "sa-east-1",
}: endpoint{
@@ -24839,55 +28704,123 @@ var awsPartition = partition{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@@ -24921,40 +28854,84 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
},
},
"sagemaker-geospatial": service{
@@ -25183,54 +29160,333 @@ var awsPartition = partition{
endpointKey{
Region: "af-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "af-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com",
+
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "ca-central-1-fips",
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "il-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+
Deprecated: boxedTrue,
},
+ },
+ },
+ "securityhub": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -25255,6 +29511,42 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "securityhub-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "il-central-1",
}: endpoint{},
@@ -25274,16 +29566,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "securityhub-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -25292,16 +29575,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "securityhub-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -25310,16 +29584,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
+ Hostname: "securityhub-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -25328,27 +29593,12 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
+ Hostname: "securityhub-fips.us-west-2.amazonaws.com",
},
},
},
- "securityhub": service{
+ "securitylake": service{
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
@@ -25361,39 +29611,21 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -25406,7 +29638,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
- Hostname: "securityhub-fips.us-east-1.amazonaws.com",
+ Hostname: "securitylake-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
@@ -25415,7 +29647,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-east-2",
}: endpoint{
- Hostname: "securityhub-fips.us-east-2.amazonaws.com",
+ Hostname: "securitylake-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
@@ -25424,7 +29656,7 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-1",
}: endpoint{
- Hostname: "securityhub-fips.us-west-1.amazonaws.com",
+ Hostname: "securitylake-fips.us-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-1",
},
@@ -25433,21 +29665,12 @@ var awsPartition = partition{
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
- Hostname: "securityhub-fips.us-west-2.amazonaws.com",
+ Hostname: "securitylake-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
Deprecated: boxedTrue,
},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -25458,7 +29681,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "securityhub-fips.us-east-1.amazonaws.com",
+ Hostname: "securitylake-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -25467,7 +29690,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "securityhub-fips.us-east-2.amazonaws.com",
+ Hostname: "securitylake-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -25476,7 +29699,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "securityhub-fips.us-west-1.amazonaws.com",
+ Hostname: "securitylake-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -25485,53 +29708,10 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "securityhub-fips.us-west-2.amazonaws.com",
+ Hostname: "securitylake-fips.us-west-2.amazonaws.com",
},
},
},
- "securitylake": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
"serverlessrepo": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -25614,21 +29794,85 @@ var awsPartition = partition{
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"servicecatalog": service{
@@ -25823,6 +30067,9 @@ var awsPartition = partition{
}: endpoint{
Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -25892,6 +30139,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -26070,6 +30320,36 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "servicediscovery-fips.ca-west-1.api.aws",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -26343,6 +30623,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -26593,6 +30876,38 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-verification-us-east-1",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-verification-us-east-2",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "fips-verification-us-west-1",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-verification-us-west-2",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -26635,6 +30950,166 @@ var awsPartition = partition{
}: endpoint{
Hostname: "signer-fips.us-west-2.amazonaws.com",
},
+ endpointKey{
+ Region: "verification-af-south-1",
+ }: endpoint{
+ Hostname: "verification.signer.af-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "af-south-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-east-1",
+ }: endpoint{
+ Hostname: "verification.signer.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-northeast-1",
+ }: endpoint{
+ Hostname: "verification.signer.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-northeast-2",
+ }: endpoint{
+ Hostname: "verification.signer.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-south-1",
+ }: endpoint{
+ Hostname: "verification.signer.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-southeast-1",
+ }: endpoint{
+ Hostname: "verification.signer.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-ap-southeast-2",
+ }: endpoint{
+ Hostname: "verification.signer.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ endpointKey{
+ Region: "verification-ca-central-1",
+ }: endpoint{
+ Hostname: "verification.signer.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-central-1",
+ }: endpoint{
+ Hostname: "verification.signer.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-north-1",
+ }: endpoint{
+ Hostname: "verification.signer.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-south-1",
+ }: endpoint{
+ Hostname: "verification.signer.eu-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-south-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-west-1",
+ }: endpoint{
+ Hostname: "verification.signer.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-west-2",
+ }: endpoint{
+ Hostname: "verification.signer.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ endpointKey{
+ Region: "verification-eu-west-3",
+ }: endpoint{
+ Hostname: "verification.signer.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ endpointKey{
+ Region: "verification-me-south-1",
+ }: endpoint{
+ Hostname: "verification.signer.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-sa-east-1",
+ }: endpoint{
+ Hostname: "verification.signer.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-east-1",
+ }: endpoint{
+ Hostname: "verification.signer.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-east-2",
+ }: endpoint{
+ Hostname: "verification.signer.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-west-1",
+ }: endpoint{
+ Hostname: "verification.signer.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-west-2",
+ }: endpoint{
+ Hostname: "verification.signer.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"simspaceweaver": service{
@@ -26689,18 +31164,36 @@ var awsPartition = partition{
},
"sms-voice": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -26710,15 +31203,33 @@ var awsPartition = partition{
}: endpoint{
Hostname: "sms-voice-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@@ -26737,6 +31248,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-west-2",
}: endpoint{
@@ -26746,6 +31275,18 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -26755,6 +31296,24 @@ var awsPartition = partition{
}: endpoint{
Hostname: "sms-voice-fips.us-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -27026,6 +31585,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-central-1",
}: endpoint{},
@@ -27119,6 +31681,15 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sns-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -27143,6 +31714,15 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "sns-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -27273,6 +31853,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -27430,6 +32013,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "ssm-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "ssm-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -27463,6 +32055,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "ssm-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -27953,6 +32554,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -27962,18 +32566,30 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@@ -27986,6 +32602,9 @@ var awsPartition = partition{
endpointKey{
Region: "il-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -28044,6 +32663,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -28207,6 +32829,24 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "storagegateway-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoint{
+ Hostname: "storagegateway-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -28363,6 +33003,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -28469,6 +33112,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -28643,6 +33289,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -28791,6 +33440,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -28939,6 +33591,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -28989,41 +33644,115 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "tax": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "tax.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
"textract": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ap-northeast-2.api.aws",
+ },
endpointKey{
Region: "ap-south-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ap-south-1.api.aws",
+ },
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ap-southeast-1.api.aws",
+ },
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ap-southeast-2.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.ca-central-1.api.aws",
+ },
endpointKey{
Region: "ca-central-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.ca-central-1.api.aws",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.eu-central-1.api.aws",
+ },
endpointKey{
Region: "eu-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.eu-west-1.api.aws",
+ },
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.eu-west-2.api.aws",
+ },
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.eu-west-3.api.aws",
+ },
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@@ -29072,52 +33801,140 @@ var awsPartition = partition{
endpointKey{
Region: "us-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-east-1.api.aws",
+ },
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-east-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-east-2.api.aws",
+ },
endpointKey{
Region: "us-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-west-1.api.aws",
+ },
endpointKey{
Region: "us-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-west-2.api.aws",
+ },
endpointKey{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-west-2.amazonaws.com",
},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-west-2.api.aws",
+ },
+ },
+ },
+ "thinclient": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
},
},
"tnb": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@@ -29471,6 +34288,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "transfer-fips.ca-central-1.amazonaws.com",
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "transfer-fips.ca-west-1.amazonaws.com",
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -29504,6 +34330,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "transfer-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-east-1",
}: endpoint{
@@ -29559,7 +34394,305 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "transfer-fips.us-east-1.amazonaws.com",
+ Hostname: "transfer-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "transfer-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "transfer-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "transfer-fips.us-west-2.amazonaws.com",
+ },
+ },
+ },
+ "translate": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "translate-fips.us-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-1-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "translate-fips.us-east-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-east-2-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "translate-fips.us-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-1-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "translate-fips.us-west-2.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-west-2-fips",
+ }: endpoint{
+ Hostname: "translate-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "verifiedpermissions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-4",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "fips-ca-central-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-east-2",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-west-2",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com",
},
endpointKey{
Region: "us-east-2",
@@ -29568,7 +34701,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "transfer-fips.us-east-2.amazonaws.com",
+ Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com",
},
endpointKey{
Region: "us-west-1",
@@ -29577,7 +34710,7 @@ var awsPartition = partition{
Region: "us-west-1",
Variant: fipsVariant,
}: endpoint{
- Hostname: "transfer-fips.us-west-1.amazonaws.com",
+ Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com",
},
endpointKey{
Region: "us-west-2",
@@ -29586,208 +34719,24 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: fipsVariant,
}: endpoint{
- Hostname: "transfer-fips.us-west-2.amazonaws.com",
+ Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com",
},
},
},
- "translate": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "translate-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "translate-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "translate-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "translate-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "translate-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "translate-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "verifiedpermissions": service{
+ "voice-chime": service{
Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "voice-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@@ -29931,6 +34880,12 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@@ -29952,12 +34907,21 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
@@ -30960,6 +35924,23 @@ var awsPartition = partition{
Region: "ca-central-1",
},
},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{
+ Hostname: "wafv2.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
+ endpointKey{
+ Region: "ca-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpointKey{
Region: "eu-central-1",
}: endpoint{
@@ -31204,6 +36185,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-ca-west-1",
+ }: endpoint{
+ Hostname: "wafv2-fips.ca-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-eu-central-1",
}: endpoint{
@@ -31549,9 +36539,18 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -31573,9 +36572,18 @@ var awsPartition = partition{
endpointKey{
Region: "ui-ap-northeast-1",
}: endpoint{},
+ endpointKey{
+ Region: "ui-ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ui-ap-southeast-1",
+ }: endpoint{},
endpointKey{
Region: "ui-ap-southeast-2",
}: endpoint{},
+ endpointKey{
+ Region: "ui-ca-central-1",
+ }: endpoint{},
endpointKey{
Region: "ui-eu-central-1",
}: endpoint{},
@@ -31724,6 +36732,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "il-central-1",
+ }: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@@ -31819,6 +36830,9 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
+ endpointKey{
+ Region: "ca-west-1",
+ }: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -32021,6 +37035,21 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "acm-pca": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"airflow": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32051,6 +37080,20 @@ var awscnPartition = partition{
},
},
},
+ "api.pricing": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"api.sagemaker": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32230,16 +37273,6 @@ var awscnPartition = partition{
}: endpoint{},
},
},
- "backupstorage": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
"batch": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32293,9 +37326,21 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
"cloudformation": service{
@@ -32468,6 +37513,31 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "datazone": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
+ },
+ },
"dax": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32595,6 +37665,31 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "eks-auth": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
+ },
+ },
"elasticache": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32680,9 +37775,21 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
"emr-containers": service{
@@ -32705,6 +37812,19 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "entitlement.marketplace": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"es": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -32892,6 +38012,16 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "inspector2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"internetmonitor": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -32970,6 +38100,29 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "iottwinmaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "api-cn-north-1",
+ }: endpoint{
+ Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "data-cn-north-1",
+ }: endpoint{
+ Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
"kafka": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -33109,7 +38262,7 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-northwest-1",
}: endpoint{
- Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn",
+ Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn",
CredentialScope: credentialScope{
Region: "cn-northwest-1",
},
@@ -33181,6 +38334,16 @@ var awscnPartition = partition{
},
},
},
+ "network-firewall": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"oam": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -33191,6 +38354,26 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "oidc": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "oidc.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "oidc.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
"organizations": service{
PartitionEndpoint: "aws-cn-global",
IsRegionalized: boxedFalse,
@@ -33213,6 +38396,34 @@ var awscnPartition = partition{
},
},
"pi": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn",
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "pipes": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
@@ -33229,6 +38440,58 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "portal.sso": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "portal.sso.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "qbusiness": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.amazonwebservices.com.cn",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{
+ Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn",
+ },
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{
+ Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
+ },
+ },
+ "quicksight": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ },
+ },
"ram": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -33269,29 +38532,14 @@ var awscnPartition = partition{
}: endpoint{},
},
},
- "resource-explorer-2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- },
+ "redshift-serverless": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
- }: endpoint{
- Hostname: "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn",
- },
+ }: endpoint{},
endpointKey{
Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn",
- },
+ }: endpoint{},
},
},
"resource-groups": service{
@@ -33481,9 +38729,17 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
},
},
"securityhub": service{
@@ -33570,6 +38826,22 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "verification-cn-north-1",
+ }: endpoint{
+ Hostname: "verification.signer.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-cn-northwest-1",
+ }: endpoint{
+ Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
},
},
"sms": service{
@@ -33660,14 +38932,36 @@ var awscnPartition = partition{
}: endpoint{},
},
},
+ "sso": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "cn-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ }: endpoint{},
+ },
+ },
"states": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-north-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "states.cn-north-1.api.amazonwebservices.com.cn",
+ },
endpointKey{
Region: "cn-northwest-1",
}: endpoint{},
+ endpointKey{
+ Region: "cn-northwest-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn",
+ },
},
},
"storagegateway": service{
@@ -34441,12 +39735,42 @@ var awsusgovPartition = partition{
},
"appconfigdata": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
+ },
},
},
"application-autoscaling": service{
@@ -34581,6 +39905,16 @@ var awsusgovPartition = partition{
},
},
},
+ "arc-zonal-shift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"athena": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -34676,13 +40010,37 @@ var awsusgovPartition = partition{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
}: endpoint{
+ Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
Protocols: []string{"http", "https"},
+
+ Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
}: endpoint{
+ Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
Protocols: []string{"http", "https"},
+
+ Deprecated: boxedTrue,
},
},
},
@@ -34706,16 +40064,6 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
- "backupstorage": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
"batch": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -34764,6 +40112,45 @@ var awsusgovPartition = partition{
},
},
},
+ "bedrock": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "bedrock-fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "bedrock-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-runtime-us-gov-west-1",
+ }: endpoint{
+ Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "bedrock-us-gov-west-1",
+ }: endpoint{
+ Hostname: "bedrock.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"cassandra": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -34843,21 +40230,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws",
+ },
},
},
"clouddirectory": service{
@@ -35179,6 +40590,13 @@ var awsusgovPartition = partition{
},
},
},
+ "codestar-connections": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ },
+ },
"cognito-identity": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -35367,9 +40785,39 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"data-ats.iot": service{
@@ -35522,23 +40970,68 @@ var awsusgovPartition = partition{
},
},
},
- "directconnect": service{
+ "datazone": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{
- Hostname: "directconnect.us-gov-east-1.amazonaws.com",
+ Hostname: "datazone.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "datazone.us-gov-west-1.api.aws",
+ },
+ },
+ },
+ "directconnect": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
+ Deprecated: boxedTrue,
},
endpointKey{
- Region: "us-gov-west-1",
+ Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "directconnect.us-gov-west-1.amazonaws.com",
+ Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com",
},
},
},
@@ -35669,6 +41162,46 @@ var awsusgovPartition = partition{
},
},
},
+ "drs": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "drs-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "drs-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "drs-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "drs-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"ds": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -35904,6 +41437,31 @@ var awsusgovPartition = partition{
},
},
},
+ "eks-auth": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "eks-auth.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "eks-auth.us-gov-west-1.api.aws",
+ },
+ },
+ },
"elasticache": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@@ -36124,6 +41682,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
@@ -36135,6 +41699,13 @@ var awsusgovPartition = partition{
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-gov-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
@@ -36146,6 +41717,15 @@ var awsusgovPartition = partition{
},
"email": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "email-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@@ -36155,6 +41735,15 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "email-fips.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -36168,12 +41757,82 @@ var awsusgovPartition = partition{
},
"emr-containers": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-containers.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
+ "emr-serverless": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "emr-serverless.us-gov-west-1.amazonaws.com",
+ },
},
},
"es": service{
@@ -36544,21 +42203,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "glue.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "glue-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "glue-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "glue.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "glue-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "glue-fips.us-gov-west-1.api.aws",
+ },
},
},
"greengrass": service{
@@ -36676,7 +42359,21 @@ var awsusgovPartition = partition{
},
},
"health": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ SSLCommonName: "health.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ },
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-us-gov-global",
+ }: endpoint{
+ Hostname: "global.health.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@@ -37321,6 +43018,62 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
+ "kinesisvideo": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"kms": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -37393,21 +43146,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lakeformation.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "lakeformation-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "lakeformation.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "lakeformation-fips.us-gov-west-1.api.aws",
+ },
},
},
"lambda": service{
@@ -37512,6 +43289,16 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
+ "license-manager-user-subscriptions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"logs": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -37552,6 +43339,36 @@ var awsusgovPartition = partition{
},
},
},
+ "m2": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{},
+ },
+ },
"managedblockchain": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -37688,6 +43505,13 @@ var awsusgovPartition = partition{
},
},
},
+ "models-v2-lex": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"models.lex": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -38021,12 +43845,76 @@ var awsusgovPartition = partition{
},
"pi": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-east-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-gov-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-east-1.api.aws",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-gov-west-1",
- }: endpoint{},
+ }: endpoint{
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "pi.us-gov-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "pi-fips.us-gov-west-1.api.aws",
+ Protocols: []string{"https"},
+ },
},
},
"pinpoint": service{
@@ -38108,6 +43996,31 @@ var awsusgovPartition = partition{
},
},
},
+ "qbusiness": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ DNSSuffix: "api.aws",
+ },
+ defaultKey{
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "{service}-fips.{region}.{dnsSuffix}",
+ DNSSuffix: "api.aws",
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{
+ Hostname: "qbusiness.us-gov-east-1.api.aws",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{
+ Hostname: "qbusiness.us-gov-west-1.api.aws",
+ },
+ },
+ },
"quicksight": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -38349,28 +44262,43 @@ var awsusgovPartition = partition{
},
},
},
- "resource-explorer-2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
+ "resiliencehub": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
},
- defaultKey{
- Variant: fipsVariant,
+ endpointKey{
+ Region: "fips-us-gov-west-1",
}: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
+ Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
},
- },
- Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "resource-explorer-2.us-gov-east-1.api.aws",
+ Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
}: endpoint{
- Hostname: "resource-explorer-2.us-gov-west-1.api.aws",
+ Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com",
},
},
},
@@ -38429,6 +44357,46 @@ var awsusgovPartition = partition{
}: endpoint{},
},
},
+ "rolesanywhere": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"route53": service{
PartitionEndpoint: "aws-us-gov-global",
IsRegionalized: boxedFalse,
@@ -38497,6 +44465,13 @@ var awsusgovPartition = partition{
},
},
},
+ "runtime-v2-lex": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ },
+ },
"runtime.lex": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -38789,17 +44764,33 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
},
},
"secretsmanager": service{
@@ -38807,37 +44798,43 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
- }: endpoint{
- Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
+
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
- }: endpoint{
- Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com",
- },
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
- Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
+
Deprecated: boxedTrue,
},
},
@@ -38882,6 +44879,46 @@ var awsusgovPartition = partition{
},
},
},
+ "securitylake": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "securitylake.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "securitylake.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "securitylake.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "securitylake.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
"serverlessrepo": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -39133,14 +45170,116 @@ var awsusgovPartition = partition{
},
},
},
+ "signer": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "signer-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "signer-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-verification-us-gov-east-1",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "fips-verification-us-gov-west-1",
+ }: endpoint{
+ Hostname: "verification.signer-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "signer-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "signer-fips.us-gov-west-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "verification-us-gov-east-1",
+ }: endpoint{
+ Hostname: "verification.signer.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "verification-us-gov-west-1",
+ }: endpoint{
+ Hostname: "verification.signer.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
"simspaceweaver": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com",
+ },
},
},
"sms": service{
@@ -39167,6 +45306,15 @@ var awsusgovPartition = partition{
},
"sms-voice": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-us-gov-west-1",
}: endpoint{
@@ -39176,6 +45324,15 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
@@ -39358,6 +45515,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1",
},
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sso.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoint{
+ Hostname: "sso.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{
@@ -39366,6 +45541,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1",
},
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "sso.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoint{
+ Hostname: "sso.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"states": service{
@@ -39729,21 +45922,45 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-gov-east-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-gov-east-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "textract.us-gov-west-1.api.aws",
+ },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "textract-fips.us-gov-west-1.amazonaws.com",
},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "textract-fips.us-gov-west-1.api.aws",
+ },
},
},
"transcribe": service{
@@ -39874,6 +46091,46 @@ var awsusgovPartition = partition{
},
},
},
+ "verifiedpermissions": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-gov-east-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-gov-west-1",
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-gov-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpointKey{
+ Region: "us-gov-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-gov-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
+ },
+ },
+ },
"waf-regional": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -40138,6 +46395,20 @@ var awsisoPartition = partition{
},
},
},
+ "api.pricing": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
"api.sagemaker": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -40150,6 +46421,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"appconfig": service{
@@ -40187,6 +46461,16 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "arc-zonal-shift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ },
+ },
"athena": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -40275,6 +46559,46 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "datasync": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+ Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoint{
+ Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov",
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov",
+ },
+ },
+ },
"directconnect": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -40397,6 +46721,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"ec2": service{
@@ -40498,14 +46825,45 @@ var awsisoPartition = partition{
},
"elasticmapreduce": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-east-1",
}: endpoint{
Protocols: []string{"https"},
},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
+ Protocols: []string{"https"},
+ },
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
+ },
},
},
"es": service{
@@ -40538,6 +46896,55 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "fsx": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-prod-us-iso-east-1",
+ }: endpoint{
+ Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+ Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "prod-us-iso-east-1",
+ }: endpoint{
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "prod-us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
+ },
+ },
+ },
"glacier": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -40557,6 +46964,19 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
"health": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -40757,22 +47177,80 @@ var awsisoPartition = partition{
},
"rds": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "rds.us-iso-east-1",
+ }: endpoint{
+ Hostname: "rds.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "rds.us-iso-west-1",
+ }: endpoint{
+ Hostname: "rds.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds.us-iso-east-1.c2s.ic.gov",
+ },
+ endpointKey{
+ Region: "us-iso-east-1-fips",
+ }: endpoint{
+ Hostname: "rds.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds.us-iso-west-1.c2s.ic.gov",
+ },
+ endpointKey{
+ Region: "us-iso-west-1-fips",
+ }: endpoint{
+ Hostname: "rds.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"redshift": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-iso-east-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "redshift.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
endpointKey{
Region: "us-iso-west-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "redshift.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
},
},
"resource-groups": service{
@@ -40823,15 +47301,186 @@ var awsisoPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+ Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "fips-us-iso-west-1",
+ }: endpoint{
+ Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-iso-east-1",
}: endpoint{
Protocols: []string{"http", "https"},
SignatureVersions: []string{"s3v4"},
},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
endpointKey{
Region: "us-iso-west-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov",
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{
+ Hostname: "s3-control.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-east-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{
+ Hostname: "s3-control.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ },
+ endpointKey{
+ Region: "us-iso-west-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-iso-west-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "s3-outposts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-iso-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-iso-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
},
},
"secretsmanager": service{
@@ -40849,6 +47498,9 @@ var awsisoPartition = partition{
endpointKey{
Region: "us-iso-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-iso-west-1",
+ }: endpoint{},
},
},
"sns": service{
@@ -40965,6 +47617,13 @@ var awsisoPartition = partition{
}: endpoint{},
},
},
+ "textract": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-iso-east-1",
+ }: endpoint{},
+ },
+ },
"transcribe": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -41057,6 +47716,34 @@ var awsisobPartition = partition{
},
},
},
+ "api.pricing": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "api.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "apigateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"appconfig": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -41083,6 +47770,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "arc-zonal-shift": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"autoscaling": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -41095,6 +47789,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "cloudcontrolapi": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"cloudformation": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -41291,9 +47992,24 @@ var awsisobPartition = partition{
},
"elasticmapreduce": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-isob-east-1",
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
+ },
},
},
"es": service{
@@ -41310,6 +48026,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "firehose": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"glacier": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -41397,6 +48120,20 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "medialive": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
+ "mediapackage": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"metering.marketplace": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -41463,16 +48200,45 @@ var awsisobPartition = partition{
},
"rds": service{
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "rds.us-isob-east-1",
+ }: endpoint{
+ Hostname: "rds.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "rds.us-isob-east-1.sc2s.sgov.gov",
+ },
+ endpointKey{
+ Region: "us-isob-east-1-fips",
+ }: endpoint{
+ Hostname: "rds.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
},
},
"redshift": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-isob-east-1",
- }: endpoint{},
+ }: endpoint{
+ Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
},
},
"resource-groups": service{
@@ -41503,6 +48269,13 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "runtime.sagemaker": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ },
+ },
"s3": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -41511,9 +48284,106 @@ var awsisobPartition = partition{
},
},
Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-isob-east-1",
+ }: endpoint{
+ Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpointDefaults{
+ defaultKey{}: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{
+ Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant | dualStackVariant,
+ }: endpoint{
+ Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-isob-east-1-fips",
+ }: endpoint{
+ Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
+ "s3-outposts": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips-us-isob-east-1",
+ }: endpoint{
+
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{},
},
},
"secretsmanager": service{
@@ -41569,6 +48439,37 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
+ "storagegateway": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "fips",
+ }: endpoint{
+ Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ endpointKey{
+ Region: "us-isob-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-isob-east-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov",
+ },
+ endpointKey{
+ Region: "us-isob-east-1-fips",
+ }: endpoint{
+ Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ Deprecated: boxedTrue,
+ },
+ },
+ },
"streams.dynamodb": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -41665,7 +48566,11 @@ var awsisoePartition = partition{
SignatureVersions: []string{"v4"},
},
},
- Regions: regions{},
+ Regions: regions{
+ "eu-isoe-west-1": region{
+ Description: "EU ISOE West",
+ },
+ },
Services: services{},
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
index 4601f883c..992ed0464 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -256,8 +256,17 @@ func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err erro
s := a.Expected.(int)
result = s == req.HTTPResponse.StatusCode
case ErrorWaiterMatch:
- if aerr, ok := err.(awserr.Error); ok {
- result = aerr.Code() == a.Expected.(string)
+ switch ex := a.Expected.(type) {
+ case string:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == ex
+ }
+ case bool:
+ if ex {
+ result = err != nil
+ } else {
+ result = err == nil
+ }
}
default:
waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
index d6fa24776..93bb5de64 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -171,6 +171,12 @@ type envConfig struct {
// AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6
EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState
+ // Specifies that IMDS clients should not fallback to IMDSv1 if token
+ // requests fail.
+ //
+ // AWS_EC2_METADATA_V1_DISABLED=true
+ EC2IMDSv1Disabled *bool
+
// Specifies that SDK clients must resolve a dual-stack endpoint for
// services.
//
@@ -251,6 +257,9 @@ var (
ec2IMDSEndpointModeEnvKey = []string{
"AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE",
}
+ ec2MetadataV1DisabledEnvKey = []string{
+ "AWS_EC2_METADATA_V1_DISABLED",
+ }
useCABundleKey = []string{
"AWS_CA_BUNDLE",
}
@@ -393,6 +402,7 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil {
return envConfig{}, err
}
+ setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey)
if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil {
return cfg, err
@@ -414,6 +424,24 @@ func setFromEnvVal(dst *string, keys []string) {
}
}
+func setBoolPtrFromEnvVal(dst **bool, keys []string) {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ switch {
+ case strings.EqualFold(value, "false"):
+ *dst = new(bool)
+ **dst = false
+ case strings.EqualFold(value, "true"):
+ *dst = new(bool)
+ **dst = true
+ }
+ }
+}
+
func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error {
for _, k := range keys {
value := os.Getenv(k)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 8127c99a9..3c88dee52 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -779,6 +779,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode)
}
+ cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback
+ if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil {
+ cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled)
+ }
+ if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil {
+ cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled)
+ }
+
cfg.S3UseARNRegion = userCfg.S3UseARNRegion
if cfg.S3UseARNRegion == nil {
cfg.S3UseARNRegion = &envCfg.S3UseARNRegion
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
index ea3ac0d03..2945185b0 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -80,6 +80,9 @@ const (
// EC2 IMDS Endpoint
ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint"
+ // ECS IMDSv1 disable fallback
+ ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled"
+
// Use DualStack Endpoint Resolution
useDualStackEndpoint = "use_dualstack_endpoint"
@@ -179,6 +182,12 @@ type sharedConfig struct {
// ec2_metadata_service_endpoint=http://fd00:ec2::254
EC2IMDSEndpoint string
+ // Specifies that IMDS clients should not fallback to IMDSv1 if token
+ // requests fail.
+ //
+ // ec2_metadata_v1_disabled=true
+ EC2IMDSv1Disabled *bool
+
// Specifies that SDK clients must resolve a dual-stack endpoint for
// services.
//
@@ -340,7 +349,7 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s
if cfg.hasSSOTokenProviderConfiguration() {
skippedFiles = 0
for _, f := range files {
- section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)))
+ section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))
if ok {
var ssoSession ssoSession
ssoSession.setFromIniSection(section)
@@ -389,8 +398,15 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
updateString(&cfg.Region, section, regionKey)
updateString(&cfg.CustomCABundle, section, customCABundleKey)
+ // we're retaining a behavioral quirk with this field that existed before
+ // the removal of literal parsing for (aws-sdk-go-v2/#2276):
+ // - if the key is missing, the config field will not be set
+ // - if the key is set to a non-numeric, the config field will be set to 0
if section.Has(roleDurationSecondsKey) {
- d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
+ var d time.Duration
+ if v, ok := section.Int(roleDurationSecondsKey); ok {
+ d = time.Duration(v) * time.Second
+ }
cfg.AssumeRoleDuration = &d
}
@@ -427,6 +443,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
ec2MetadataServiceEndpointModeKey, file.Filename, err)
}
updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey)
+ updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey)
updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint)
@@ -668,7 +685,10 @@ func updateBool(dst *bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
- *dst = section.Bool(key)
+
+ // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false
+ v, _ := section.Bool(key)
+ *dst = v
}
// updateBoolPtr will only update the dst with the value in the section key,
@@ -677,8 +697,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
+
+ // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false
+ v, _ := section.Bool(key)
*dst = new(bool)
- **dst = section.Bool(key)
+ **dst = v
}
// SharedConfigLoadError is an error for the shared config file failed to load.
@@ -805,7 +828,8 @@ func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section i
return
}
- if section.Bool(key) {
+ // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false
+ if v, _ := section.Bool(key); v {
*dst = endpoints.DualStackEndpointStateEnabled
} else {
*dst = endpoints.DualStackEndpointStateDisabled
@@ -821,7 +845,8 @@ func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section
return
}
- if section.Bool(key) {
+ // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false
+ if v, _ := section.Bool(key); v {
*dst = endpoints.FIPSEndpointStateEnabled
} else {
*dst = endpoints.FIPSEndpointStateDisabled
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index b20979374..b542df931 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -125,6 +125,7 @@ var requiredSignedHeaders = rules{
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Expected-Bucket-Owner": struct{}{},
"X-Amz-Grant-Full-control": struct{}{},
"X-Amz-Grant-Read": struct{}{},
"X-Amz-Grant-Read-Acp": struct{}{},
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 46f1b0061..7ab65bae7 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.45.7"
+const SDKVersion = "1.55.6"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
index 34a481afb..b1b686086 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
@@ -154,11 +154,11 @@ func (v ValueType) String() string {
// ValueType enums
const (
NoneType = ValueType(iota)
- DecimalType
- IntegerType
+ DecimalType // deprecated
+ IntegerType // deprecated
StringType
QuotedStringType
- BoolType
+ BoolType // deprecated
)
// Value is a union container
@@ -166,9 +166,9 @@ type Value struct {
Type ValueType
raw []rune
- integer int64
- decimal float64
- boolean bool
+ integer int64 // deprecated
+ decimal float64 // deprecated
+ boolean bool // deprecated
str string
}
@@ -253,24 +253,6 @@ func newLitToken(b []rune) (Token, int, error) {
}
token = newToken(TokenLit, b[:n], QuotedStringType)
- } else if isNumberValue(b) {
- var base int
- base, n, err = getNumericalValue(b)
- if err != nil {
- return token, 0, err
- }
-
- value := b[:n]
- vType := IntegerType
- if contains(value, '.') || hasExponent(value) {
- vType = DecimalType
- }
- token = newToken(TokenLit, value, vType)
- token.base = base
- } else if isBoolValue(b) {
- n, err = getBoolValue(b)
-
- token = newToken(TokenLit, b[:n], BoolType)
} else {
n, err = getValue(b)
token = newToken(TokenLit, b[:n], StringType)
@@ -280,18 +262,33 @@ func newLitToken(b []rune) (Token, int, error) {
}
// IntValue returns an integer value
-func (v Value) IntValue() int64 {
- return v.integer
+func (v Value) IntValue() (int64, bool) {
+ i, err := strconv.ParseInt(string(v.raw), 0, 64)
+ if err != nil {
+ return 0, false
+ }
+ return i, true
}
// FloatValue returns a float value
-func (v Value) FloatValue() float64 {
- return v.decimal
+func (v Value) FloatValue() (float64, bool) {
+ f, err := strconv.ParseFloat(string(v.raw), 64)
+ if err != nil {
+ return 0, false
+ }
+ return f, true
}
// BoolValue returns a bool value
-func (v Value) BoolValue() bool {
- return v.boolean
+func (v Value) BoolValue() (bool, bool) {
+ // we don't use ParseBool as it recognizes more than what we've
+ // historically supported
+ if isCaselessLitValue(runesTrue, v.raw) {
+ return true, true
+ } else if isCaselessLitValue(runesFalse, v.raw) {
+ return false, true
+ }
+ return false, false
}
func isTrimmable(r rune) bool {
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
index 081cf4334..1d08e138a 100644
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
@@ -145,17 +145,17 @@ func (t Section) ValueType(k string) (ValueType, bool) {
}
// Bool returns a bool value at k
-func (t Section) Bool(k string) bool {
+func (t Section) Bool(k string) (bool, bool) {
return t.values[k].BoolValue()
}
// Int returns an integer value at k
-func (t Section) Int(k string) int64 {
+func (t Section) Int(k string) (int64, bool) {
return t.values[k].IntValue()
}
// Float64 returns a float value at k
-func (t Section) Float64(k string) float64 {
+func (t Section) Float64(k string) (float64, bool) {
return t.values[k].FloatValue()
}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
index 058334053..2ca0b19db 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri
}
func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
- // If it's empty, generate an empty value
- if !value.IsNil() && value.Len() == 0 {
+ // If it's empty, and not ec2, generate an empty value
+ if !value.IsNil() && value.Len() == 0 && !q.isEC2 {
v.Set(prefix, "")
return nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
index 2882d4556..f1fa8dcf0 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -67,19 +67,47 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req
// AbortMultipartUpload API operation for Amazon Simple Storage Service.
//
-// This action aborts a multipart upload. After a multipart upload is aborted,
+// This operation aborts a multipart upload. After a multipart upload is aborted,
// no additional parts can be uploaded using that upload ID. The storage consumed
// by any previously uploaded parts will be freed. However, if any part uploads
// are currently in progress, those part uploads might or might not succeed.
// As a result, it might be necessary to abort a given multipart upload multiple
// times in order to completely free all storage consumed by all parts.
//
-// To verify that all parts have been removed, so you don't get charged for
+// To verify that all parts have been removed and prevent getting charged for
// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
-// action and ensure that the parts list is empty.
+// API operation and ensure that the parts list is empty.
//
-// For information about permissions required to use the multipart upload, see
-// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - For information about permissions
+// required to use the multipart upload, see Multipart Upload and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to AbortMultipartUpload:
//
@@ -173,60 +201,93 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput)
//
// You first initiate the multipart upload and then upload all parts using the
// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
// operation. After successfully uploading all relevant parts of an upload,
-// you call this action to complete the upload. Upon receiving this request,
-// Amazon S3 concatenates all the parts in ascending order by part number to
-// create a new object. In the Complete Multipart Upload request, you must provide
-// the parts list. You must ensure that the parts list is complete. This action
-// concatenates the parts that you provide in the list. For each part in the
-// list, you must provide the part number and the ETag value, returned after
-// that part was uploaded.
-//
-// Processing of a Complete Multipart Upload request could take several minutes
-// to complete. After Amazon S3 begins processing the request, it sends an HTTP
+// you call this CompleteMultipartUpload operation to complete the upload. Upon
+// receiving this request, Amazon S3 concatenates all the parts in ascending
+// order by part number to create a new object. In the CompleteMultipartUpload
+// request, you must provide the parts list and ensure that the parts list is
+// complete. The CompleteMultipartUpload API operation concatenates the parts
+// that you provide in the list. For each part in the list, you must provide
+// the PartNumber value and the ETag value that are returned after that part
+// was uploaded.
+//
+// The processing of a CompleteMultipartUpload request could take several minutes
+// to finalize. After Amazon S3 begins processing the request, it sends an HTTP
// response header that specifies a 200 OK response. While processing is in
// progress, Amazon S3 periodically sends white space characters to keep the
// connection from timing out. A request could fail after the initial 200 OK
// response has been sent. This means that a 200 OK response can contain either
-// a success or an error. If you call the S3 API directly, make sure to design
-// your application to parse the contents of the response and handle it appropriately.
+// a success or an error. The error response might be embedded in the 200 OK
+// response. If you call this API operation directly, make sure to design your
+// application to parse the contents of the response and handle it appropriately.
// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs
// detect the embedded error and apply error handling per your configuration
// settings (including automatically retrying the request as appropriate). If
-// the condition persists, the SDKs throws an exception (or, for the SDKs that
-// don't use exceptions, they return the error).
+// the condition persists, the SDKs throw an exception (or, for the SDKs that
+// don't use exceptions, they return an error).
//
// Note that if CompleteMultipartUpload fails, applications should be prepared
-// to retry the failed requests. For more information, see Amazon S3 Error Best
-// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html).
+// to retry any failed requests (including 500 error responses). For more information,
+// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html).
//
-// You cannot use Content-Type: application/x-www-form-urlencoded with Complete
-// Multipart Upload requests. Also, if you do not provide a Content-Type header,
-// CompleteMultipartUpload returns a 200 OK response.
+// You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload
+// requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload
+// can still return a 200 OK response.
//
// For more information about multipart uploads, see Uploading Objects Using
-// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
+// in the Amazon S3 User Guide.
//
-// For information about permissions required to use the multipart upload API,
-// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - For information about permissions
+// required to use the multipart upload API, see Multipart Upload and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon S3 User Guide.
//
-// CompleteMultipartUpload has the following special errors:
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
//
-// - Error code: EntityTooSmall Description: Your proposed upload is smaller
+// Special errors
+//
+// - Error Code: EntityTooSmall Description: Your proposed upload is smaller
// than the minimum allowed object size. Each part must be at least 5 MB
-// in size, except the last part. 400 Bad Request
+// in size, except the last part. HTTP Status Code: 400 Bad Request
//
-// - Error code: InvalidPart Description: One or more of the specified parts
+// - Error Code: InvalidPart Description: One or more of the specified parts
// could not be found. The part might not have been uploaded, or the specified
-// entity tag might not have matched the part's entity tag. 400 Bad Request
+// ETag might not have matched the uploaded part's ETag. HTTP Status Code:
+// 400 Bad Request
//
-// - Error code: InvalidPartOrder Description: The list of parts was not
+// - Error Code: InvalidPartOrder Description: The list of parts was not
// in ascending order. The parts list must be specified in order by part
-// number. 400 Bad Request
+// number. HTTP Status Code: 400 Bad Request
//
-// - Error code: NoSuchUpload Description: The specified multipart upload
+// - Error Code: NoSuchUpload Description: The specified multipart upload
// does not exist. The upload ID might be invalid, or the multipart upload
-// might have been aborted or completed. 404 Not Found
+// might have been aborted or completed. HTTP Status Code: 404 Not Found
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to CompleteMultipartUpload:
//
@@ -319,184 +380,113 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou
// upload Upload Part - Copy (UploadPartCopy) API. For more information, see
// Copy Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html).
//
-// All copy requests must be authenticated. Additionally, you must have read
-// access to the source object and write access to the destination bucket. For
-// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
-// Both the Region that you want to copy the object from and the Region that
-// you want to copy the object to must be enabled for your account.
-//
-// A copy request might return an error when Amazon S3 receives the copy request
-// or while Amazon S3 is copying the files. If the error occurs before the copy
-// action starts, you receive a standard Amazon S3 error. If the error occurs
-// during the copy operation, the error response is embedded in the 200 OK response.
-// This means that a 200 OK response can contain either a success or an error.
-// If you call the S3 API directly, make sure to design your application to
-// parse the contents of the response and handle it appropriately. If you use
-// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the
-// embedded error and apply error handling per your configuration settings (including
-// automatically retrying the request as appropriate). If the condition persists,
-// the SDKs throws an exception (or, for the SDKs that don't use exceptions,
-// they return the error).
-//
-// If the copy is successful, you receive a response with information about
-// the copied object.
-//
-// If the request is an HTTP 1.1 request, the response is chunk encoded. If
-// it were not, it would not contain the content-length, and you would need
-// to read the entire body.
+// You can copy individual objects between general purpose buckets, between
+// directory buckets, and between general purpose buckets and directory buckets.
//
-// The copy request charge is based on the storage class and Region that you
-// specify for the destination object. The request can also result in a data
-// retrieval charge for the source if the source storage class bills for data
-// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/).
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// Both the Region that you want to copy the object from and the Region that
+// you want to copy the object to must be enabled for your account. For more
+// information about how to enable a Region for your account, see Enable or
+// disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone)
+// in the Amazon Web Services Account Management Guide.
//
// Amazon S3 transfer acceleration does not support cross-Region copies. If
// you request a cross-Region copy using a transfer acceleration endpoint, you
// get a 400 Bad Request error. For more information, see Transfer Acceleration
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html).
//
-// # Metadata
-//
-// When copying an object, you can preserve all metadata (the default) or specify
-// new metadata. However, the access control list (ACL) is not preserved and
-// is set to private for the user making the request. To override the default
-// ACL setting, specify a new ACL when generating a copy request. For more information,
-// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
-//
-// To specify whether you want the object metadata copied from the source object
-// or replaced with metadata provided in the request, you can optionally add
-// the x-amz-metadata-directive header. When you grant permissions, you can
-// use the s3:x-amz-metadata-directive condition key to enforce certain metadata
-// behavior when objects are uploaded. For more information, see Specifying
-// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html)
-// in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition
-// keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html).
-//
-// x-amz-website-redirect-location is unique to each object and must be specified
-// in the request headers to copy the value.
-//
-// x-amz-copy-source-if Headers
-//
-// To only copy an object under certain conditions, such as whether the Etag
-// matches or whether the object was modified before or after a specified date,
-// use the following request parameters:
-//
-// - x-amz-copy-source-if-match
-//
-// - x-amz-copy-source-if-none-match
-//
-// - x-amz-copy-source-if-unmodified-since
+// # Authentication and authorization
//
-// - x-amz-copy-source-if-modified-since
-//
-// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
-// headers are present in the request and evaluate as follows, Amazon S3 returns
-// 200 OK and copies the data:
-//
-// - x-amz-copy-source-if-match condition evaluates to true
-//
-// - x-amz-copy-source-if-unmodified-since condition evaluates to false
-//
-// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since
-// headers are present in the request and evaluate as follows, Amazon S3 returns
-// the 412 Precondition Failed response code:
-//
-// - x-amz-copy-source-if-none-match condition evaluates to false
-//
-// - x-amz-copy-source-if-modified-since condition evaluates to true
-//
-// All headers with the x-amz- prefix, including x-amz-copy-source, must be
-// signed.
-//
-// # Server-side encryption
-//
-// Amazon S3 automatically encrypts all new objects that are copied to an S3
-// bucket. When copying an object, if you don't specify encryption information
-// in your copy request, the encryption setting of the target object is set
-// to the default encryption configuration of the destination bucket. By default,
-// all buckets have a base level of encryption configuration that uses server-side
-// encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket
-// has a default encryption configuration that uses server-side encryption with
-// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption
-// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with
-// customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding
-// KMS key, or a customer-provided key to encrypt the target object copy.
-//
-// When you perform a CopyObject operation, if you want to use a different type
-// of encryption setting for the target object, you can use other appropriate
-// encryption-related headers to encrypt the target object with a KMS key, an
-// Amazon S3 managed key, or a customer-provided key. With server-side encryption,
-// Amazon S3 encrypts your data as it writes your data to disks in its data
-// centers and decrypts the data when you access it. If the encryption setting
-// in your request is different from the default encryption configuration of
-// the destination bucket, the encryption setting in your request takes precedence.
-// If the source object for the copy is stored in Amazon S3 using SSE-C, you
-// must provide the necessary encryption information in your request so that
-// Amazon S3 can decrypt the object for copying. For more information about
-// server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html).
-//
-// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the
-// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
-// in the Amazon S3 User Guide.
-//
-// # Access Control List (ACL)-Specific Request Headers
-//
-// When copying an object, you can optionally use headers to grant ACL-based
-// permissions. By default, all objects are private. Only the owner has full
-// access control. When adding a new object, you can grant permissions to individual
-// Amazon Web Services accounts or to predefined groups that are defined by
-// Amazon S3. These permissions are then added to the ACL on the object. For
-// more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
-// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
-//
-// If the bucket that you're copying objects to uses the bucket owner enforced
-// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions.
-// Buckets that use this setting only accept PUT requests that don't specify
-// an ACL or PUT requests that specify bucket owner full control ACLs, such
-// as the bucket-owner-full-control canned ACL or an equivalent form of this
-// ACL expressed in the XML format.
-//
-// For more information, see Controlling ownership of objects and disabling
-// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
-// in the Amazon S3 User Guide.
-//
-// If your bucket uses the bucket owner enforced setting for Object Ownership,
-// all objects written to the bucket by any account will be owned by the bucket
-// owner.
+// All CopyObject requests must be authenticated and signed by using IAM credentials
+// (access key ID and secret access key for the IAM identities). All headers
+// with the x-amz- prefix, including x-amz-copy-source, must be signed. For
+// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
//
-// # Checksums
+// Directory buckets - You must use the IAM credentials to authenticate and
+// authorize your access to the CopyObject API operation, instead of using the
+// temporary security credentials through the CreateSession API operation.
//
-// When copying an object, if it has a checksum, that checksum will be copied
-// to the new object by default. When you copy the object over, you can optionally
-// specify a different checksum algorithm to use with the x-amz-checksum-algorithm
-// header.
+// Amazon Web Services CLI or SDKs handles authentication and authorization
+// on your behalf.
//
-// # Storage Class Options
+// # Permissions
//
-// You can use the CopyObject action to change the storage class of an object
-// that is already stored in Amazon S3 by using the StorageClass parameter.
-// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
-// in the Amazon S3 User Guide.
+// You must have read access to the source object and write access to the destination
+// bucket.
//
-// If the source object's storage class is GLACIER, you must restore a copy
-// of this object before you can use it as a source object for the copy operation.
-// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
-// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html).
+// - General purpose bucket permissions - You must have permissions in an
+// IAM policy based on the source and destination bucket types in a CopyObject
+// operation. If the source object is in a general purpose bucket, you must
+// have s3:GetObject permission to read the source object that is being copied.
+// If the destination bucket is a general purpose bucket, you must have s3:PutObject
+// permission to write the object copy to the destination bucket.
+//
+// - Directory bucket permissions - You must have permissions in a bucket
+// policy or an IAM identity-based policy based on the source and destination
+// bucket types in a CopyObject operation. If the source object that you
+// want to copy is in a directory bucket, you must have the s3express:CreateSession
+// permission in the Action element of a policy to read the object. By default,
+// the session is in the ReadWrite mode. If you want to restrict the access,
+// you can explicitly set the s3express:SessionMode condition key to ReadOnly
+// on the copy source bucket. If the copy destination is a directory bucket,
+// you must have the s3express:CreateSession permission in the Action element
+// of a policy to write the object to the destination. The s3express:SessionMode
+// condition key can't be set to ReadOnly on the copy destination bucket.
+// For example policies, see Example bucket policies for S3 Express One Zone
+// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
+// and Amazon Web Services Identity and Access Management (IAM) identity-based
+// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
+// in the Amazon S3 User Guide.
//
-// # Versioning
+// # Response and special errors
+//
+// When the request is an HTTP 1.1 request, the response is chunk encoded. When
+// the request is not an HTTP 1.1 request, the response would not contain the
+// Content-Length. You always need to read the entire response body to check
+// if the copy succeeds.
+//
+// - If the copy is successful, you receive a response with information about
+// the copied object.
+//
+// - A copy request might return an error when Amazon S3 receives the copy
+// request or while Amazon S3 is copying the files. A 200 OK response can
+// contain either a success or an error. If the error occurs before the copy
+// action starts, you receive a standard Amazon S3 error. If the error occurs
+// during the copy operation, the error response is embedded in the 200 OK
+// response. For example, in a cross-region copy, you may encounter throttling
+// and receive a 200 OK response. For more information, see Resolve the Error
+// 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror).
+// The 200 OK status code means the copy was accepted, but it doesn't mean
+// the copy is complete. Another example is when you disconnect from Amazon
+// S3 before the copy is complete, Amazon S3 might cancel the copy and you
+// may receive a 200 OK response. You must stay connected to Amazon S3 until
+// the entire response is successfully received and processed. If you call
+// this API operation directly, make sure to design your application to parse
+// the content of the response and handle it appropriately. If you use Amazon
+// Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded
+// error and apply error handling per your configuration settings (including
+// automatically retrying the request as appropriate). If the condition persists,
+// the SDKs throw an exception (or, for the SDKs that don't use exceptions,
+// they return an error).
+//
+// # Charge
//
-// By default, x-amz-copy-source header identifies the current version of an
-// object to copy. If the current version is a delete marker, Amazon S3 behaves
-// as if the object was deleted. To copy a different version, use the versionId
-// subresource.
+// The copy request charge is based on the storage class and Region that you
+// specify for the destination object. The request can also result in a data
+// retrieval charge for the source if the source storage class bills for data
+// retrieval. If the copy source is in a different region, the data transfer
+// is billed to the copy source account. For pricing information, see Amazon
+// S3 pricing (http://aws.amazon.com/s3/pricing/).
//
-// If you enable versioning on the target bucket, Amazon S3 generates a unique
-// version ID for the object being copied. This version ID is different from
-// the version ID of the source object. Amazon S3 returns the version ID of
-// the copied object in the x-amz-version-id response header in the response.
+// # HTTP Host header syntax
//
-// If you do not enable versioning or suspend it on the target bucket, the version
-// ID that Amazon S3 generates is always null.
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to CopyObject:
//
@@ -581,77 +571,97 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request
// CreateBucket API operation for Amazon Simple Storage Service.
//
-// Creates a new S3 bucket. To create a bucket, you must register with Amazon
-// S3 and have a valid Amazon Web Services Access Key ID to authenticate requests.
+// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts
+// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html).
+//
+// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and
+// have a valid Amazon Web Services Access Key ID to authenticate requests.
// Anonymous requests are never allowed to create buckets. By creating the bucket,
// you become the bucket owner.
//
-// Not every string is an acceptable bucket name. For information about bucket
-// naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html).
-//
-// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket
-// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html).
-//
-// By default, the bucket is created in the US East (N. Virginia) Region. You
-// can optionally specify a Region in the request body. You might choose a Region
-// to optimize latency, minimize costs, or address regulatory requirements.
-// For example, if you reside in Europe, you will probably find it advantageous
-// to create buckets in the Europe (Ireland) Region. For more information, see
-// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro).
-//
-// If you send your create bucket request to the s3.amazonaws.com endpoint,
-// the request goes to the us-east-1 Region. Accordingly, the signature calculations
-// in Signature Version 4 must use us-east-1 as the Region, even if the location
-// constraint in the request specifies another Region where the bucket is to
-// be created. If you create a bucket in a Region other than US East (N. Virginia),
-// your application must be able to handle 307 redirect. For more information,
-// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html).
+// There are two types of buckets: general purpose buckets and directory buckets.
+// For more information about these bucket types, see Creating, configuring,
+// and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html)
+// in the Amazon S3 User Guide.
//
-// # Permissions
+// - General purpose buckets - If you send your CreateBucket request to the
+// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region.
+// So the signature calculations in Signature Version 4 must use us-east-1
+// as the Region, even if the location constraint in the request specifies
+// another Region where the bucket is to be created. If you create a bucket
+// in a Region other than US East (N. Virginia), your application must be
+// able to handle 307 redirect. For more information, see Virtual hosting
+// of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html)
+// in the Amazon S3 User Guide.
//
-// In addition to s3:CreateBucket, the following permissions are required when
-// your CreateBucket request includes specific headers:
-//
-// - Access control lists (ACLs) - If your CreateBucket request specifies
-// access control list (ACL) permissions and the ACL is public-read, public-read-write,
-// authenticated-read, or if you specify access permissions explicitly through
-// any other ACL, both s3:CreateBucket and s3:PutBucketAcl permissions are
-// needed. If the ACL for the CreateBucket request is private or if the request
-// doesn't specify any ACLs, only s3:CreateBucket permission is needed.
-//
-// - Object Lock - If ObjectLockEnabledForBucket is set to true in your CreateBucket
-// request, s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning
-// permissions are required.
-//
-// - S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership
-// header, then the s3:PutBucketOwnershipControls permission is required.
-// By default, ObjectOwnership is set to BucketOWnerEnforced and ACLs are
-// disabled. We recommend keeping ACLs disabled, except in uncommon use cases
-// where you must control access for each object individually. If you want
-// to change the ObjectOwnership setting, you can use the x-amz-object-ownership
-// header in your CreateBucket request to set the ObjectOwnership setting
-// of your choice. For more information about S3 Object Ownership, see Controlling
-// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
+// - Directory buckets - For directory buckets, you must make requests for
+// this API operation to the Regional endpoint. These endpoints support path-style
+// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+// . Virtual-hosted-style requests aren't supported. For more information,
+// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
//
-// - S3 Block Public Access - If your specific use case requires granting
-// public access to your S3 resources, you can disable Block Public Access.
-// You can create a new bucket with Block Public Access enabled, then separately
-// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
+// Permissions
+//
+// - General purpose bucket permissions - In addition to the s3:CreateBucket
+// permission, the following permissions are required in a policy when your
+// CreateBucket request includes specific headers: Access control lists (ACLs)
+//
+// - In your CreateBucket request, if you specify an access control list
+// (ACL) and set it to public-read, public-read-write, authenticated-read,
+// or if you explicitly specify any other custom ACLs, both s3:CreateBucket
+// and s3:PutBucketAcl permissions are required. In your CreateBucket request,
+// if you set the ACL to private, or if you don't specify any ACLs, only
+// the s3:CreateBucket permission is required. Object Lock - In your CreateBucket
+// request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration
+// and s3:PutBucketVersioning permissions are required. S3 Object Ownership
+//
+// - If your CreateBucket request includes the x-amz-object-ownership header,
+// then the s3:PutBucketOwnershipControls permission is required. To set
+// an ACL on a bucket as part of a CreateBucket request, you must explicitly
+// set S3 Object Ownership for the bucket to a different value than the default,
+// BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public
+// access, you must first create the bucket (without the bucket ACL) and
+// then explicitly disable Block Public Access on the bucket before using
+// PutBucketAcl to set the ACL. If you try to create a bucket with a public
+// ACL, the request will fail. For the majority of modern use cases in S3,
+// we recommend that you keep all Block Public Access settings enabled and
+// keep ACLs disabled. If you would like to share data with users outside
+// of your account, you can use bucket policies as needed. For more information,
+// see Controlling ownership of objects and disabling ACLs for your bucket
+// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
+// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html)
+// in the Amazon S3 User Guide. S3 Block Public Access - If your specific
+// use case requires granting public access to your S3 resources, you can
+// disable Block Public Access. Specifically, you can create a new bucket
+// with Block Public Access enabled, then separately call the DeletePublicAccessBlock
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
-// permission. By default, all Block Public Access settings are enabled for
-// new buckets. To avoid inadvertent exposure of your resources, we recommend
-// keeping the S3 Block Public Access settings enabled. For more information
-// about S3 Block Public Access, see Blocking public access to your Amazon
-// S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
+// permission. For more information about S3 Block Public Access, see Blocking
+// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html)
// in the Amazon S3 User Guide.
//
-// If your CreateBucket request sets BucketOwnerEnforced for Amazon S3 Object
-// Ownership and specifies a bucket ACL that provides access to an external
-// Amazon Web Services account, your request fails with a 400 error and returns
-// the InvalidBucketAcLWithObjectOwnership error code. For more information,
-// see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html)
-// in the Amazon S3 User Guide.
+// - Directory bucket permissions - You must have the s3express:CreateBucket
+// permission in an IAM identity-based policy instead of a bucket policy.
+// Cross-account access to this API operation isn't supported. This operation
+// can only be performed by the Amazon Web Services account that owns the
+// resource. For more information about directory bucket policies and permissions,
+// see Amazon Web Services Identity and Access Management (IAM) for S3 Express
+// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
+// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3
+// Object Ownership, and S3 Block Public Access are not supported for directory
+// buckets. For directory buckets, all Block Public Access settings are enabled
+// at the bucket level and S3 Object Ownership is set to Bucket owner enforced
+// (ACLs disabled). These settings can't be modified. For more information
+// about permissions for creating and working with directory buckets, see
+// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
+// in the Amazon S3 User Guide. For more information about supported S3 features
+// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features)
+// in the Amazon S3 User Guide.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.
//
// The following operations are related to CreateBucket:
//
@@ -749,164 +759,139 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re
// You specify this upload ID in each of your subsequent upload part requests
// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)).
// You also include this upload ID in the final request to either complete or
-// abort the multipart upload request.
+// abort the multipart upload request. For more information about multipart
+// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html)
+// in the Amazon S3 User Guide.
//
-// For more information about multipart uploads, see Multipart Upload Overview
-// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html).
+// After you initiate a multipart upload and upload one or more parts, to stop
+// being charged for storing the uploaded parts, you must either complete or
+// abort the multipart upload. Amazon S3 frees up the space used to store the
+// parts and stops charging you for storing them only after you either complete
+// or abort a multipart upload.
//
// If you have configured a lifecycle rule to abort incomplete multipart uploads,
-// the upload must complete within the number of days specified in the bucket
-// lifecycle configuration. Otherwise, the incomplete multipart upload becomes
-// eligible for an abort action and Amazon S3 aborts the multipart upload. For
-// more information, see Aborting Incomplete Multipart Uploads Using a Bucket
-// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+// the created multipart upload must be completed within the number of days
+// specified in the bucket lifecycle configuration. Otherwise, the incomplete
+// multipart upload becomes eligible for an abort action and Amazon S3 aborts
+// the multipart upload. For more information, see Aborting Incomplete Multipart
+// Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+//
+// - Directory buckets - S3 Lifecycle is not supported by directory buckets.
+//
+// - Directory buckets - For directory buckets, you must make requests for
+// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
//
-// For information about the permissions required to use the multipart upload
-// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+// # Request signing
//
// For request signing, multipart upload is just a series of regular requests.
// You initiate a multipart upload, send one or more requests to upload parts,
// and then complete the multipart upload process. You sign each request individually.
// There is nothing special about signing multipart upload requests. For more
// information about signing, see Authenticating Requests (Amazon Web Services
-// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html).
-//
-// After you initiate a multipart upload and upload one or more parts, to stop
-// being charged for storing the uploaded parts, you must either complete or
-// abort the multipart upload. Amazon S3 frees up the space used to store the
-// parts and stop charging you for storing them only after you either complete
-// or abort a multipart upload.
-//
-// Server-side encryption is for data encryption at rest. Amazon S3 encrypts
-// your data as it writes it to disks in its data centers and decrypts it when
-// you access it. Amazon S3 automatically encrypts all new objects that are
-// uploaded to an S3 bucket. When doing a multipart upload, if you don't specify
-// encryption information in your request, the encryption setting of the uploaded
-// parts is set to the default encryption configuration of the destination bucket.
-// By default, all buckets have a base level of encryption configuration that
-// uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the
-// destination bucket has a default encryption configuration that uses server-side
-// encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided
-// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided
-// key to encrypt the uploaded parts. When you perform a CreateMultipartUpload
-// operation, if you want to use a different type of encryption setting for
-// the uploaded parts, you can request that Amazon S3 encrypts the object with
-// a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption
-// setting in your request is different from the default encryption configuration
-// of the destination bucket, the encryption setting in your request takes precedence.
-// If you choose to provide your own encryption key, the request headers you
-// provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
-// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
-// requests must match the headers you used in the request to initiate the upload
-// by using CreateMultipartUpload. You can request that Amazon S3 save the uploaded
-// parts encrypted with server-side encryption with an Amazon S3 managed key
-// (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided
-// encryption key (SSE-C).
-//
-// To perform a multipart upload with encryption by using an Amazon Web Services
-// KMS key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey*
-// actions on the key. These permissions are required because Amazon S3 must
-// decrypt and read data from the encrypted file parts before it completes the
-// multipart upload. For more information, see Multipart upload API and permissions
-// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions)
-// and Protecting data using server-side encryption with Amazon Web Services
-// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
+// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html)
// in the Amazon S3 User Guide.
//
-// If your Identity and Access Management (IAM) user or role is in the same
-// Amazon Web Services account as the KMS key, then you must have these permissions
-// on the key policy. If your IAM user or role belongs to a different account
-// than the key, then you must have the permissions on both the key policy and
-// your IAM user or role.
-//
-// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html).
-//
-// # Access Permissions
-//
-// When copying an object, you can optionally specify the accounts or groups
-// that should be granted specific permissions on the new object. There are
-// two ways to grant the permissions using the request headers:
-//
-// - Specify a canned ACL with the x-amz-acl request header. For more information,
-// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
-//
-// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp,
-// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters
-// map to the set of permissions that Amazon S3 supports in an ACL. For more
-// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
-//
-// You can use either a canned ACL or specify access permissions explicitly.
-// You cannot do both.
-//
-// # Server-Side- Encryption-Specific Request Headers
-//
-// Amazon S3 encrypts data by using server-side encryption with an Amazon S3
-// managed key (SSE-S3) by default. Server-side encryption is for data encryption
-// at rest. Amazon S3 encrypts your data as it writes it to disks in its data
-// centers and decrypts it when you access it. You can request that Amazon S3
-// encrypts data at rest by using server-side encryption with other key options.
-// The option you use depends on whether you want to use KMS keys (SSE-KMS)
-// or provide your own encryption keys (SSE-C).
+// Permissions
+//
+// - General purpose bucket permissions - For information about the permissions
+// required to use the multipart upload API, see Multipart upload and permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon S3 User Guide. To perform a multipart upload with encryption
+// by using an Amazon Web Services KMS key, the requester must have permission
+// to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These
+// permissions are required because Amazon S3 must decrypt and read data
+// from the encrypted file parts before it completes the multipart upload.
+// For more information, see Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions)
+// and Protecting data using server-side encryption with Amazon Web Services
+// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
+// in the Amazon S3 User Guide.
//
-// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// Encryption
+//
+// - General purpose buckets - Server-side encryption is for data encryption
+// at rest. Amazon S3 encrypts your data as it writes it to disks in its
+// data centers and decrypts it when you access it. Amazon S3 automatically
+// encrypts all new objects that are uploaded to an S3 bucket. When doing
+// a multipart upload, if you don't specify encryption information in your
+// request, the encryption setting of the uploaded parts is set to the default
+// encryption configuration of the destination bucket. By default, all buckets
+// have a base level of encryption configuration that uses server-side encryption
+// with Amazon S3 managed keys (SSE-S3). If the destination bucket has a
+// default encryption configuration that uses server-side encryption with
+// an Key Management Service (KMS) key (SSE-KMS), or a customer-provided
+// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a
+// customer-provided key to encrypt the uploaded parts. When you perform
+// a CreateMultipartUpload operation, if you want to use a different type
+// of encryption setting for the uploaded parts, you can request that Amazon
+// S3 encrypts the object with a different encryption key (such as an Amazon
+// S3 managed key, a KMS key, or a customer-provided key). When the encryption
+// setting in your request is different from the default encryption configuration
+// of the destination bucket, the encryption setting in your request takes
+// precedence. If you choose to provide your own encryption key, the request
+// headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
+// requests must match the headers you used in the CreateMultipartUpload
+// request. Use KMS keys (SSE-KMS) that include the Amazon Web Services managed
// key (aws/s3) and KMS customer managed keys stored in Key Management Service
// (KMS) – If you want Amazon Web Services to manage the keys used to encrypt
// data, specify the following headers in the request. x-amz-server-side-encryption
// x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context
// If you specify x-amz-server-side-encryption:aws:kms, but don't provide
// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon
-// Web Services managed key (aws/s3 key) in KMS to protect the data. All
-// GET and PUT requests for an object protected by KMS fail if you don't
-// make them by using Secure Sockets Layer (SSL), Transport Layer Security
-// (TLS), or Signature Version 4. For more information about server-side
-// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side
-// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html).
-//
-// - Use customer-provided encryption keys (SSE-C) – If you want to manage
-// your own encryption keys, provide all the following headers in the request.
-// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key
-// x-amz-server-side-encryption-customer-key-MD5 For more information about
-// server-side encryption with customer-provided encryption keys (SSE-C),
-// see Protecting data using server-side encryption with customer-provided
-// encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html).
-//
-// # Access-Control-List (ACL)-Specific Request Headers
-//
-// You also can use the following access control–related headers with this
-// operation. By default, all objects are private. Only the owner has full access
-// control. When adding a new object, you can grant permissions to individual
-// Amazon Web Services accounts or to predefined groups defined by Amazon S3.
-// These permissions are then added to the access control list (ACL) on the
-// object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
-// With this operation, you can grant access permissions using one of the following
-// two methods:
-//
-// - Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined
-// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees
-// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
-//
-// - Specify access permissions explicitly — To explicitly grant access
-// permissions to specific Amazon Web Services accounts or groups, use the
-// following headers. Each header maps to specific permissions that Amazon
-// S3 supports in an ACL. For more information, see Access Control List (ACL)
-// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html).
-// In the header, you specify a list of grantees who get the specific permission.
-// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write
-// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You
-// specify each grantee as a type=value pair, where the type is one of the
-// following: id – if the value specified is the canonical user ID of an
-// Amazon Web Services account uri – if you are granting permissions to
-// a predefined group emailAddress – if the value specified is the email
-// address of an Amazon Web Services account Using email addresses to specify
-// a grantee is only supported in the following Amazon Web Services Regions:
-// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific
-// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland)
-// South America (São Paulo) For a list of all the Amazon S3 supported Regions
-// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
-// in the Amazon Web Services General Reference. For example, the following
-// x-amz-grant-read header grants the Amazon Web Services accounts identified
-// by account IDs permissions to read object data and its metadata: x-amz-grant-read:
-// id="11112222333", id="444455556666"
+// Web Services managed key (aws/s3 key) in KMS to protect the data. To perform
+// a multipart upload with encryption by using an Amazon Web Services KMS
+// key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey*
+// actions on the key. These permissions are required because Amazon S3 must
+// decrypt and read data from the encrypted file parts before it completes
+// the multipart upload. For more information, see Multipart upload API and
+// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions)
+// and Protecting data using server-side encryption with Amazon Web Services
+// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
+// in the Amazon S3 User Guide. If your Identity and Access Management (IAM)
+// user or role is in the same Amazon Web Services account as the KMS key,
+// then you must have these permissions on the key policy. If your IAM user
+// or role is in a different account from the key, then you must have the
+// permissions on both the key policy and your IAM user or role. All GET
+// and PUT requests for an object protected by KMS fail if you don't make
+// them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS),
+// or Signature Version 4. For information about configuring any of the officially
+// supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying
+// the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
+// in the Amazon S3 User Guide. For more information about server-side encryption
+// with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption
+// with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
+// in the Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C)
+// – If you want to manage your own encryption keys, provide all the following
+// headers in the request. x-amz-server-side-encryption-customer-algorithm
+// x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5
+// For more information about server-side encryption with customer-provided
+// encryption keys (SSE-C), see Protecting data using server-side encryption
+// with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html)
+// in the Amazon S3 User Guide.
+//
+// - Directory buckets -For directory buckets, only server-side encryption
+// with Amazon S3 managed keys (SSE-S3) (AES256) is supported.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to CreateMultipartUpload:
//
@@ -948,6 +933,152 @@ func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMult
return out, req.Send()
}
+const opCreateSession = "CreateSession"
+
+// CreateSessionRequest generates a "aws/request.Request" representing the
+// client's request for the CreateSession operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateSession for more information on using the CreateSession
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the CreateSessionRequest method.
+// req, resp := client.CreateSessionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession
+func (c *S3) CreateSessionRequest(input *CreateSessionInput) (req *request.Request, output *CreateSessionOutput) {
+ op := &request.Operation{
+ Name: opCreateSession,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?session",
+ }
+
+ if input == nil {
+ input = &CreateSessionInput{}
+ }
+
+ output = &CreateSessionOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateSession API operation for Amazon Simple Storage Service.
+//
+// Creates a session that establishes temporary security credentials to support
+// fast authentication and authorization for the Zonal endpoint APIs on directory
+// buckets. For more information about Zonal endpoint APIs that include the
+// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html)
+// in the Amazon S3 User Guide.
+//
+// To make Zonal endpoint API requests on a directory bucket, use the CreateSession
+// API operation. Specifically, you grant s3express:CreateSession permission
+// to a bucket in a bucket policy or an IAM identity-based policy. Then, you
+// use IAM credentials to make the CreateSession API request on the bucket,
+// which returns temporary security credentials that include the access key
+// ID, secret access key, session token, and expiration. These credentials have
+// associated permissions to access the Zonal endpoint APIs. After the session
+// is created, you don’t need to use other policies to grant permissions to
+// each Zonal endpoint API individually. Instead, in your Zonal endpoint API
+// requests, you sign your requests by applying the temporary security credentials
+// of the session to the request headers and following the SigV4 protocol for
+// authentication. You also apply the session token to the x-amz-s3session-token
+// request header for authorization. Temporary security credentials are scoped
+// to the bucket and expire after 5 minutes. After the expiration time, any
+// calls that you make with those credentials will fail. You must use IAM credentials
+// again to make a CreateSession API request that generates a new set of temporary
+// credentials for use. Temporary credentials cannot be extended or refreshed
+// beyond the original specified interval.
+//
+// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes
+// automatically to avoid service interruptions when a session expires. We recommend
+// that you use the Amazon Web Services SDKs to initiate and manage requests
+// to the CreateSession API. For more information, see Performance guidelines
+// and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication)
+// in the Amazon S3 User Guide.
+//
+// - You must make requests for this API operation to the Zonal endpoint.
+// These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com.
+// Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject
+// API operation doesn't use the temporary security credentials returned
+// from the CreateSession API operation for authentication and authorization.
+// For information about authentication and authorization of the CopyObject
+// API operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html).
+//
+// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket
+// API operation doesn't use the temporary security credentials returned
+// from the CreateSession API operation for authentication and authorization.
+// For information about authentication and authorization of the HeadBucket
+// API operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html).
+//
+// # Permissions
+//
+// To obtain temporary security credentials, you must create a bucket policy
+// or an IAM identity-based policy that grants s3express:CreateSession permission
+// to the bucket. In a policy, you can have the s3express:SessionMode condition
+// key to control who can create a ReadWrite or ReadOnly session. For more information
+// about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters).
+// For example policies, see Example bucket policies for S3 Express One Zone
+// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
+// and Amazon Web Services Identity and Access Management (IAM) identity-based
+// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
+// in the Amazon S3 User Guide.
+//
+// To grant cross-account access to Zonal endpoint APIs, the bucket policy should
+// also grant both accounts the s3express:CreateSession permission.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateSession for usage and error information.
+//
+// Returned Error Codes:
+// - ErrCodeNoSuchBucket "NoSuchBucket"
+// The specified bucket does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession
+func (c *S3) CreateSession(input *CreateSessionInput) (*CreateSessionOutput, error) {
+ req, out := c.CreateSessionRequest(input)
+ return out, req.Send()
+}
+
+// CreateSessionWithContext is the same as CreateSession with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateSession for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateSessionWithContext(ctx aws.Context, input *CreateSessionInput, opts ...request.Option) (*CreateSessionOutput, error) {
+ req, out := c.CreateSessionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opDeleteBucket = "DeleteBucket"
// DeleteBucketRequest generates a "aws/request.Request" representing the
@@ -995,6 +1126,35 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request
// Deletes the S3 bucket. All objects (including all object versions and delete
// markers) in the bucket must be deleted before the bucket itself can be deleted.
//
+// - Directory buckets - If multipart uploads in a directory bucket are in
+// progress, you can't delete the bucket until all the in-progress multipart
+// uploads are aborted or completed.
+//
+// - Directory buckets - For directory buckets, you must make requests for
+// this API operation to the Regional endpoint. These endpoints support path-style
+// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+// . Virtual-hosted-style requests aren't supported. For more information,
+// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - You must have the s3:DeleteBucket
+// permission on the specified bucket in a policy.
+//
+// - Directory bucket permissions - You must have the s3express:DeleteBucket
+// permission in an IAM identity-based policy instead of a bucket policy.
+// Cross-account access to this API operation isn't supported. This operation
+// can only be performed by the Amazon Web Services account that owns the
+// resource. For more information about directory bucket policies and permissions,
+// see Amazon Web Services Identity and Access Management (IAM) for S3 Express
+// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
+// in the Amazon S3 User Guide.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.
+//
// The following operations are related to DeleteBucket:
//
// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
@@ -1073,6 +1233,8 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt
// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Deletes an analytics configuration for the bucket (specified by the analytics
// configuration ID).
//
@@ -1165,6 +1327,8 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request
// DeleteBucketCors API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Deletes the cors configuration information set for the bucket.
//
// To use this operation, you must have permission to perform the s3:PutBucketCORS
@@ -1252,6 +1416,8 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (
// DeleteBucketEncryption API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// This implementation of the DELETE action resets the default encryption for
// the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3).
// For information about the bucket default encryption feature, see Amazon S3
@@ -1343,6 +1509,8 @@ func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBuc
// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
//
// The S3 Intelligent-Tiering storage class is designed to optimize storage
@@ -1442,6 +1610,8 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent
// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Deletes an inventory configuration (identified by the inventory ID) from
// the bucket.
//
@@ -1534,6 +1704,8 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re
// DeleteBucketLifecycle API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Deletes the lifecycle configuration from the specified bucket. Amazon S3
// removes all the lifecycle configuration rules in the lifecycle subresource
// associated with the bucket. Your objects never expire, and Amazon S3 no longer
@@ -1628,6 +1800,8 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC
// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Deletes a metrics configuration for the Amazon CloudWatch request metrics
// (specified by the metrics configuration ID) from the bucket. Note that this
// doesn't include the daily storage metrics.
@@ -1723,6 +1897,8 @@ func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipCo
// DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Removes OwnershipControls for an Amazon S3 bucket. To use this operation,
// you must have the s3:PutBucketOwnershipControls permission. For more information
// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
@@ -1808,11 +1984,21 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req
// DeleteBucketPolicy API operation for Amazon Simple Storage Service.
//
-// This implementation of the DELETE action uses the policy subresource to delete
-// the policy of a specified bucket. If you are using an identity other than
-// the root user of the Amazon Web Services account that owns the bucket, the
-// calling identity must have the DeleteBucketPolicy permissions on the specified
-// bucket and belong to the bucket owner's account to use this operation.
+// Deletes the policy of a specified bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Regional endpoint. These endpoints support path-style
+// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+// . Virtual-hosted-style requests aren't supported. For more information, see
+// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// # Permissions
+//
+// If you are using an identity other than the root user of the Amazon Web Services
+// account that owns the bucket, the calling identity must both have the DeleteBucketPolicy
+// permissions on the specified bucket and belong to the bucket owner's account
+// in order to use this operation.
//
// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403
// Access Denied error. If you have the correct permissions, but you're not
@@ -1827,8 +2013,23 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req
// these API actions by VPC endpoint policies and Amazon Web Services Organizations
// policies.
//
-// For more information about bucket policies, see Using Bucket Policies and
-// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
+// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission
+// is required in a policy. For more information about general purpose buckets
+// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
+// in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation,
+// you must have the s3express:DeleteBucketPolicy permission in an IAM identity-based
+// policy instead of a bucket policy. Cross-account access to this API operation
+// isn't supported. This operation can only be performed by the Amazon Web
+// Services account that owns the resource. For more information about directory
+// bucket policies and permissions, see Amazon Web Services Identity and
+// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
+// in the Amazon S3 User Guide.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.
//
// The following operations are related to DeleteBucketPolicy
//
@@ -1908,6 +2109,8 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
// DeleteBucketReplication API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Deletes the replication configuration from the bucket.
//
// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration
@@ -2000,6 +2203,8 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r
// DeleteBucketTagging API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Deletes the tags from the bucket.
//
// To use this operation, you must have permission to perform the s3:PutBucketTagging
@@ -2084,6 +2289,8 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r
// DeleteBucketWebsite API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// This action removes the website configuration for a bucket. Amazon S3 returns
// a 200 OK response upon successfully deleting a website configuration on the
// specified bucket. You will get a 200 OK response if the website configuration
@@ -2176,31 +2383,88 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request
// DeleteObject API operation for Amazon Simple Storage Service.
//
-// Removes the null version (if there is one) of an object and inserts a delete
-// marker, which becomes the latest version of the object. If there isn't a
-// null version, Amazon S3 does not remove any objects but will still respond
-// that the command was successful.
+// Removes an object from a bucket. The behavior depends on the bucket's versioning
+// state:
+//
+// - If bucket versioning is not enabled, the operation permanently deletes
+// the object.
+//
+// - If bucket versioning is enabled, the operation inserts a delete marker,
+// which becomes the current version of the object. To permanently delete
+// an object in a versioned bucket, you must include the object’s versionId
+// in the request. For more information about versioning-enabled buckets,
+// see Deleting object versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html).
+//
+// - If bucket versioning is suspended, the operation removes the object
+// that has a null versionId, if there is one, and inserts a delete marker
+// that becomes the current version of the object. If there isn't an object
+// with a null versionId, and all versions of the object have a versionId,
+// Amazon S3 does not remove the object and only inserts a delete marker.
+// To permanently delete an object that has a versionId, you must include
+// the object’s versionId in the request. For more information about versioning-suspended
+// buckets, see Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html).
+//
+// - Directory buckets - S3 Versioning isn't enabled and supported for directory
+// buckets. For this API operation, only the null value of the version ID
+// is supported by directory buckets. You can only specify null to the versionId
+// query parameter in the request.
+//
+// - Directory buckets - For directory buckets, you must make requests for
+// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
//
-// To remove a specific version, you must use the version Id subresource. Using
-// this subresource permanently deletes the version. If the object deleted is
-// a delete marker, Amazon S3 sets the response header, x-amz-delete-marker,
+// To remove a specific version, you must use the versionId query parameter.
+// Using this query parameter permanently deletes the version. If the object
+// deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker
// to true.
//
// If the object you want to delete is in a bucket where the bucket versioning
// configuration is MFA Delete enabled, you must include the x-amz-mfa request
// header in the DELETE versionId request. Requests that include x-amz-mfa must
-// use HTTPS.
+// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html)
+// in the Amazon S3 User Guide. To see sample requests that use versioning,
+// see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete).
//
-// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html).
-// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete).
+// Directory buckets - MFA delete is not supported by directory buckets.
//
-// You can delete objects by explicitly calling DELETE Object or configure its
-// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html))
+// You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html))
// to enable Amazon S3 to remove them for you. If you want to block users or
// accounts from removing or deleting objects from your bucket, you must deny
// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration
// actions.
//
+// Directory buckets - S3 Lifecycle is not supported by directory buckets.
+//
+// Permissions
+//
+// - General purpose bucket permissions - The following permissions are required
+// in your policies when your DeleteObjects request includes specific headers.
+// s3:DeleteObject - To delete an object from a bucket, you must always have
+// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific
+// version of an object from a versioning-enabled bucket, you must have the
+// s3:DeleteObjectVersion permission.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
+//
// The following action is related to DeleteObject:
//
// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
@@ -2276,6 +2540,8 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r
// DeleteObjectTagging API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Removes the entire tag set from the specified object. For more information
// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
//
@@ -2367,36 +2633,82 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque
// DeleteObjects API operation for Amazon Simple Storage Service.
//
-// This action enables you to delete multiple objects from a bucket using a
-// single HTTP request. If you know the object keys that you want to delete,
-// then this action provides a suitable alternative to sending individual delete
-// requests, reducing per-request overhead.
+// This operation enables you to delete multiple objects from a bucket using
+// a single HTTP request. If you know the object keys that you want to delete,
+// then this operation provides a suitable alternative to sending individual
+// delete requests, reducing per-request overhead.
//
-// The request contains a list of up to 1000 keys that you want to delete. In
-// the XML, you provide the object key names, and optionally, version IDs if
-// you want to delete a specific version of the object from a versioning-enabled
-// bucket. For each key, Amazon S3 performs a delete action and returns the
-// result of that delete, success, or failure, in the response. Note that if
+// The request can contain a list of up to 1000 keys that you want to delete.
+// In the XML, you provide the object key names, and optionally, version IDs
+// if you want to delete a specific version of the object from a versioning-enabled
+// bucket. For each key, Amazon S3 performs a delete operation and returns the
+// result of that delete, success or failure, in the response. Note that if
// the object specified in the request is not found, Amazon S3 returns the result
// as deleted.
//
-// The action supports two modes for the response: verbose and quiet. By default,
-// the action uses verbose mode in which the response includes the result of
-// deletion of each key in your request. In quiet mode the response includes
-// only keys where the delete action encountered an error. For a successful
-// deletion, the action does not return any information about the delete in
-// the response body.
+// - Directory buckets - S3 Versioning isn't enabled and supported for directory
+// buckets.
+//
+// - Directory buckets - For directory buckets, you must make requests for
+// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// The operation supports two modes for the response: verbose and quiet. By
+// default, the operation uses verbose mode in which the response includes the
+// result of deletion of each key in your request. In quiet mode the response
+// includes only keys where the delete operation encountered an error. For a
+// successful deletion in a quiet mode, the operation does not return any information
+// about the delete in the response body.
//
// When performing this action on an MFA Delete enabled bucket, that attempts
// to delete any versioned objects, you must include an MFA token. If you do
// not provide one, the entire request will fail, even if there are non-versioned
// objects you are trying to delete. If you provide an invalid token, whether
// there are versioned keys in the request or not, the entire Multi-Object Delete
-// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete).
+// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete)
+// in the Amazon S3 User Guide.
+//
+// Directory buckets - MFA delete is not supported by directory buckets.
//
-// Finally, the Content-MD5 header is required for all Multi-Object Delete requests.
-// Amazon S3 uses the header value to ensure that your request body has not
-// been altered in transit.
+// Permissions
+//
+// - General purpose bucket permissions - The following permissions are required
+// in your policies when your DeleteObjects request includes specific headers.
+// s3:DeleteObject - To delete an object from a bucket, you must always specify
+// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific
+// version of an object from a versioning-enabled bucket, you must specify
+// the s3:DeleteObjectVersion permission.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// Content-MD5 request header
+//
+// - General purpose bucket - The Content-MD5 request header is required
+// for all Multi-Object Delete requests. Amazon S3 uses the header value
+// to ensure that your request body has not been altered in transit.
+//
+// - Directory bucket - The Content-MD5 request header or a additional checksum
+// request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c,
+// x-amz-checksum-sha1, or x-amz-checksum-sha256) is required for all Multi-Object
+// Delete requests.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to DeleteObjects:
//
@@ -2482,6 +2794,8 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput)
// DeletePublicAccessBlock API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use
// this operation, you must have the s3:PutBucketPublicAccessBlock permission.
// For more information about permissions, see Permissions Related to Bucket
@@ -2569,6 +2883,8 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC
// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// This implementation of the GET action uses the accelerate subresource to
// return the Transfer Acceleration state of a bucket, which is either Enabled
// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that
@@ -2668,16 +2984,18 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request
// GetBucketAcl API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// This implementation of the GET action uses the acl subresource to return
// the access control list (ACL) of a bucket. To use GET to return the ACL of
-// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission
-// is granted to the anonymous user, you can return the ACL of the bucket without
-// using an authorization header.
+// the bucket, you must have the READ_ACP access to the bucket. If READ_ACP
+// permission is granted to the anonymous user, you can return the ACL of the
+// bucket without using an authorization header.
//
-// To use this API operation against an access point, provide the alias of the
-// access point in place of the bucket name.
+// When you use this API operation with an access point, provide the alias of
+// the access point in place of the bucket name.
//
-// To use this API operation against an Object Lambda access point, provide
+// When you use this API operation with an Object Lambda access point, provide
// the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
@@ -2764,6 +3082,8 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon
// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// This implementation of the GET action returns an analytics configuration
// (identified by the analytics configuration ID) from the bucket.
//
@@ -2857,6 +3177,8 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque
// GetBucketCors API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the Cross-Origin Resource Sharing (CORS) configuration information
// set for the bucket.
//
@@ -2864,10 +3186,10 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque
// action. By default, the bucket owner has this permission and can grant it
// to others.
//
-// To use this API operation against an access point, provide the alias of the
-// access point in place of the bucket name.
+// When you use this API operation with an access point, provide the alias of
+// the access point in place of the bucket name.
//
-// To use this API operation against an Object Lambda access point, provide
+// When you use this API operation with an Object Lambda access point, provide
// the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
@@ -2953,6 +3275,8 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r
// GetBucketEncryption API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the default encryption configuration for an Amazon S3 bucket. By
// default, all buckets have a default encryption configuration that uses server-side
// encryption with Amazon S3 managed keys (SSE-S3). For information about the
@@ -3043,6 +3367,8 @@ func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketInt
// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Gets the S3 Intelligent-Tiering configuration from the specified bucket.
//
// The S3 Intelligent-Tiering storage class is designed to optimize storage
@@ -3141,6 +3467,8 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon
// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns an inventory configuration (identified by the inventory configuration
// ID) from the bucket.
//
@@ -3242,6 +3570,8 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
// see the updated version of this topic. This topic is provided for backward
// compatibility.
//
+// This operation is not supported by directory buckets.
+//
// Returns the lifecycle configuration information set on the bucket. For information
// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html).
//
@@ -3340,13 +3670,18 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon
// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Bucket lifecycle configuration now supports specifying a lifecycle rule using
-// an object key name prefix, one or more object tags, or a combination of both.
+// an object key name prefix, one or more object tags, object size, or any combination
+// of these. Accordingly, this section describes the latest API. The previous
+// version of the API supported filtering based only on an object key name prefix,
+// which is supported for backward compatibility. For the related API description,
+// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html).
// Accordingly, this section describes the latest API. The response describes
// the new filter element that you can use to specify a filter to select a subset
// of objects to which the rule applies. If you are using a previous version
-// of the lifecycle configuration, it still works. For the earlier action, see
-// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html).
+// of the lifecycle configuration, it still works. For the earlier action,
//
// Returns the lifecycle configuration information set on the bucket. For information
// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html).
@@ -3442,14 +3777,16 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque
// GetBucketLocation API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the Region the bucket resides in. You set the bucket's Region using
// the LocationConstraint request parameter in a CreateBucket request. For more
// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html).
//
-// To use this API operation against an access point, provide the alias of the
-// access point in place of the bucket name.
+// When you use this API operation with an access point, provide the alias of
+// the access point in place of the bucket name.
//
-// To use this API operation against an Object Lambda access point, provide
+// When you use this API operation with an Object Lambda access point, provide
// the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
@@ -3536,6 +3873,8 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request
// GetBucketLogging API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the logging status of a bucket and the permissions users have to
// view and modify that status.
//
@@ -3616,6 +3955,8 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu
// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Gets a metrics configuration (specified by the metrics configuration ID)
// from the bucket. Note that this doesn't include the daily storage metrics.
//
@@ -3714,6 +4055,8 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
// GetBucketNotification API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -3791,6 +4134,8 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat
// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the notification configuration of a bucket.
//
// If notifications are not enabled on the bucket, the action returns an empty
@@ -3801,10 +4146,10 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat
// to other users to read this configuration with the s3:GetBucketNotification
// permission.
//
-// To use this API operation against an access point, provide the alias of the
-// access point in place of the bucket name.
+// When you use this API operation with an access point, provide the alias of
+// the access point in place of the bucket name.
//
-// To use this API operation against an Object Lambda access point, provide
+// When you use this API operation with an Object Lambda access point, provide
// the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
@@ -3889,6 +4234,8 @@ func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControls
// GetBucketOwnershipControls API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation,
// you must have the s3:GetBucketOwnershipControls permission. For more information
// about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html).
@@ -3973,10 +4320,21 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R
// GetBucketPolicy API operation for Amazon Simple Storage Service.
//
-// Returns the policy of a specified bucket. If you are using an identity other
-// than the root user of the Amazon Web Services account that owns the bucket,
-// the calling identity must have the GetBucketPolicy permissions on the specified
-// bucket and belong to the bucket owner's account in order to use this operation.
+// Returns the policy of a specified bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Regional endpoint. These endpoints support path-style
+// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+// . Virtual-hosted-style requests aren't supported. For more information, see
+// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// # Permissions
+//
+// If you are using an identity other than the root user of the Amazon Web Services
+// account that owns the bucket, the calling identity must both have the GetBucketPolicy
+// permissions on the specified bucket and belong to the bucket owner's account
+// in order to use this operation.
//
// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access
// Denied error. If you have the correct permissions, but you're not using an
@@ -3991,17 +4349,33 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R
// these API actions by VPC endpoint policies and Amazon Web Services Organizations
// policies.
//
-// To use this API operation against an access point, provide the alias of the
-// access point in place of the bucket name.
+// - General purpose bucket permissions - The s3:GetBucketPolicy permission
+// is required in a policy. For more information about general purpose buckets
+// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
+// in the Amazon S3 User Guide.
//
-// To use this API operation against an Object Lambda access point, provide
-// the alias of the Object Lambda access point in place of the bucket name.
-// If the Object Lambda access point alias in a request is not valid, the error
-// code InvalidAccessPointAliasError is returned. For more information about
-// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList).
+// - Directory bucket permissions - To grant access to this API operation,
+// you must have the s3express:GetBucketPolicy permission in an IAM identity-based
+// policy instead of a bucket policy. Cross-account access to this API operation
+// isn't supported. This operation can only be performed by the Amazon Web
+// Services account that owns the resource. For more information about directory
+// bucket policies and permissions, see Amazon Web Services Identity and
+// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
+// in the Amazon S3 User Guide.
+//
+// # Example bucket policies
+//
+// General purpose buckets example bucket policies - See Bucket policy examples
+// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html)
+// in the Amazon S3 User Guide.
+//
+// Directory bucket example bucket policies - See Example bucket policies for
+// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
+// in the Amazon S3 User Guide.
+//
+// # HTTP Host header syntax
//
-// For more information about bucket policies, see Using Bucket Policies and
-// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html).
+// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.
//
// The following action is related to GetBucketPolicy:
//
@@ -4078,6 +4452,8 @@ func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (re
// GetBucketPolicyStatus API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Retrieves the policy status for an Amazon S3 bucket, indicating whether the
// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus
// permission. For more information about Amazon S3 permissions, see Specifying
@@ -4167,6 +4543,8 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req
// GetBucketReplication API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the replication configuration of a bucket.
//
// It can take a while to propagate the put or delete a replication configuration
@@ -4264,6 +4642,8 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput)
// GetBucketRequestPayment API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the request payment configuration of a bucket. To use this version
// of the operation, you must be the bucket owner. For more information, see
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html).
@@ -4343,6 +4723,8 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request
// GetBucketTagging API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the tag set associated with the bucket.
//
// To use this operation, you must have permission to perform the s3:GetBucketTagging
@@ -4431,6 +4813,8 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r
// GetBucketVersioning API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the versioning state of a bucket.
//
// To retrieve the versioning state of a bucket, you must be the bucket owner.
@@ -4518,6 +4902,8 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request
// GetBucketWebsite API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the website configuration for a bucket. To host website on Amazon
// S3, you can configure a bucket as website by adding a website configuration.
// For more information about hosting websites, see Hosting Websites on Amazon
@@ -4605,113 +4991,106 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp
// GetObject API operation for Amazon Simple Storage Service.
//
-// Retrieves objects from Amazon S3. To use GET, you must have READ access to
-// the object. If you grant READ access to the anonymous user, you can return
-// the object without using an authorization header.
-//
-// An Amazon S3 bucket has no directory hierarchy such as you would find in
-// a typical computer file system. You can, however, create a logical hierarchy
-// by using object key names that imply a folder structure. For example, instead
-// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg.
-//
-// To get an object from such a logical hierarchy, specify the full key name
-// for the object in the GET operation. For a virtual hosted-style request example,
-// if you have the object photos/2006/February/sample.jpg, specify the resource
-// as /photos/2006/February/sample.jpg. For a path-style request example, if
-// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket,
-// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For
-// more information about request types, see HTTP Host Header Bucket Specification
-// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket).
-//
-// For more information about returning the ACL of an object, see GetObjectAcl
-// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html).
+// Retrieves an object from Amazon S3.
//
-// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval
-// or S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive
-// or S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the
-// object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
-// Otherwise, this action returns an InvalidObjectState error. For information
-// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html).
-//
-// Encryption request headers, like x-amz-server-side-encryption, should not
-// be sent for GET requests if your object uses server-side encryption with
-// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption
-// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with
-// Amazon S3 managed encryption keys (SSE-S3). If your object does use these
-// types of keys, you’ll get an HTTP 400 Bad Request error.
-//
-// If you encrypt an object by using server-side encryption with customer-provided
-// encryption keys (SSE-C) when you store the object in Amazon S3, then when
-// you GET the object, you must use the following headers:
+// In the GetObject request, specify the full key name for the object.
//
-// - x-amz-server-side-encryption-customer-algorithm
-//
-// - x-amz-server-side-encryption-customer-key
-//
-// - x-amz-server-side-encryption-customer-key-MD5
-//
-// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
-// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
-//
-// Assuming you have the relevant permission to read object tags, the response
-// also returns the x-amz-tagging-count header that provides the count of number
-// of tags associated with the object. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
-// to retrieve the tag set associated with an object.
-//
-// # Permissions
-//
-// You need the relevant read object (or version) permission for this operation.
-// For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
-// If the object that you request doesn’t exist, the error that Amazon S3
-// returns depends on whether you also have the s3:ListBucket permission.
-//
-// If you have the s3:ListBucket permission on the bucket, Amazon S3 returns
-// an HTTP status code 404 (Not Found) error.
+// General purpose buckets - Both the virtual-hosted-style requests and the
+// path-style requests are supported. For a virtual hosted-style request example,
+// if you have the object photos/2006/February/sample.jpg, specify the object
+// key name as /photos/2006/February/sample.jpg. For a path-style request example,
+// if you have the object photos/2006/February/sample.jpg in the bucket named
+// examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg.
+// For more information about request types, see HTTP Host Header Bucket Specification
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket)
+// in the Amazon S3 User Guide.
//
-// If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP
-// status code 403 ("access denied") error.
+// Directory buckets - Only virtual-hosted-style requests are supported. For
+// a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg
+// in the bucket named examplebucket--use1-az5--x-s3, specify the object key
+// name as /photos/2006/February/sample.jpg. Also, when you make requests to
+// this API operation, your requests are sent to the Zonal endpoint. These endpoints
+// support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
//
-// # Versioning
+// Permissions
+//
+// - General purpose bucket permissions - You must have the required permissions
+// in a policy. To use GetObject, you must have the READ access to the object
+// (or version). If you grant READ access to the anonymous user, the GetObject
+// operation returns the object without using an authorization header. For
+// more information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
+// in the Amazon S3 User Guide. If you include a versionId in your request
+// header, you must have the s3:GetObjectVersion permission to access a specific
+// version of an object. The s3:GetObject permission is not required in this
+// scenario. If you request the current version of an object without a specific
+// versionId in the request header, only the s3:GetObject permission is required.
+// The s3:GetObjectVersion permission is not required in this scenario. If
+// the object that you request doesn’t exist, the error that Amazon S3
+// returns depends on whether you also have the s3:ListBucket permission.
+// If you have the s3:ListBucket permission on the bucket, Amazon S3 returns
+// an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket
+// permission, Amazon S3 returns an HTTP status code 403 Access Denied error.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// # Storage classes
//
-// By default, the GET action returns the current version of an object. To return
-// a different version, use the versionId subresource.
+// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval
+// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering
+// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier,
+// before you can retrieve the object you must first restore a copy using RestoreObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
+// Otherwise, this operation returns an InvalidObjectState error. For information
+// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
+// in the Amazon S3 User Guide.
//
-// - If you supply a versionId, you need the s3:GetObjectVersion permission
-// to access a specific version of an object. If you request a specific version,
-// you do not need to have the s3:GetObject permission. If you request the
-// current version without a specific version ID, only s3:GetObject permission
-// is required. s3:GetObjectVersion permission won't be required.
+// Directory buckets - For directory buckets, only the S3 Express One Zone storage
+// class is supported to store newly created objects. Unsupported storage class
+// values won't write a destination object and will respond with the HTTP status
+// code 400 Bad Request.
//
-// - If the current version of the object is a delete marker, Amazon S3 behaves
-// as if the object was deleted and includes x-amz-delete-marker: true in
-// the response.
+// # Encryption
//
-// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html).
+// Encryption request headers, like x-amz-server-side-encryption, should not
+// be sent for the GetObject requests, if your object uses server-side encryption
+// with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with
+// Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption
+// with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in
+// your GetObject requests for the object that uses these types of keys, you’ll
+// get an HTTP 400 Bad Request error.
//
-// # Overriding Response Header Values
+// # Overriding response header values through the request
//
// There are times when you want to override certain response header values
-// in a GET response. For example, you might override the Content-Disposition
-// response header value in your GET request.
-//
-// You can override values for a set of response headers using the following
-// query parameters. These response header values are sent only on a successful
-// request, that is, when status code 200 OK is returned. The set of headers
-// you can override using these parameters is a subset of the headers that Amazon
-// S3 accepts when you create an object. The response headers that you can override
-// for the GET response are Content-Type, Content-Language, Expires, Cache-Control,
-// Content-Disposition, and Content-Encoding. To override these header values
-// in the GET response, you use the following request parameters.
-//
-// You must sign the request, either using an Authorization header or a presigned
-// URL, when using these parameters. They cannot be used with an unsigned (anonymous)
-// request.
+// of a GetObject response. For example, you might override the Content-Disposition
+// response header value through your GetObject request.
//
-// - response-content-type
+// You can override values for a set of response headers. These modified response
+// header values are included only in a successful response, that is, when the
+// HTTP status code 200 OK is returned. The headers you can override using the
+// following query parameters in the request are a subset of the headers that
+// Amazon S3 accepts when you create an object.
//
-// - response-content-language
+// The response headers that you can override for the GetObject response are
+// Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type,
+// and Expires.
//
-// - response-expires
+// To override values for a set of response headers in the GetObject response,
+// you can use the following query parameters in the request.
//
// - response-cache-control
//
@@ -4719,17 +5098,19 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp
//
// - response-content-encoding
//
-// # Overriding Response Header Values
+// - response-content-language
+//
+// - response-content-type
+//
+// - response-expires
//
-// If both of the If-Match and If-Unmodified-Since headers are present in the
-// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since
-// condition evaluates to false; then, S3 returns 200 OK and the data requested.
+// When you use these parameters, you must sign the request by using either
+// an Authorization header or a presigned URL. These parameters cannot be used
+// with an unsigned (anonymous) request.
//
-// If both of the If-None-Match and If-Modified-Since headers are present in
-// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since
-// condition evaluates to true; then, S3 returns 304 Not Modified response code.
+// # HTTP Host header syntax
//
-// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to GetObject:
//
@@ -4752,6 +5133,15 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp
// - ErrCodeInvalidObjectState "InvalidObjectState"
// Object is archived and inaccessible until restored.
//
+// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval
+// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering
+// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier,
+// before you can retrieve the object you must first restore a copy using RestoreObject
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
+// Otherwise, this operation returns an InvalidObjectState error. For information
+// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
+// in the Amazon S3 User Guide.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
req, out := c.GetObjectRequest(input)
@@ -4817,13 +5207,15 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request
// GetObjectAcl API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the access control list (ACL) of an object. To use this operation,
// you must have s3:GetObjectAcl permissions or READ_ACP access to the object.
// For more information, see Mapping of ACL permissions and access policy permissions
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping)
// in the Amazon S3 User Guide
//
-// This action is not supported by Amazon S3 on Outposts.
+// This functionality is not supported for Amazon S3 on Outposts.
//
// By default, GET returns ACL information about the current version of an object.
// To return ACL information about a different version, use the versionId subresource.
@@ -4921,16 +5313,65 @@ func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *r
// GetObjectAttributes API operation for Amazon Simple Storage Service.
//
// Retrieves all the metadata from an object without returning the object itself.
-// This action is useful if you're interested only in an object's metadata.
-// To use GetObjectAttributes, you must have READ access to the object.
+// This operation is useful if you're interested only in an object's metadata.
//
// GetObjectAttributes combines the functionality of HeadObject and ListParts.
// All of the data returned with each of those individual calls can be returned
// with a single call to GetObjectAttributes.
//
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// Permissions
+//
+// - General purpose bucket permissions - To use GetObjectAttributes, you
+// must have READ access to the object. The permissions that you need to
+// use this operation with depend on whether the bucket is versioned. If
+// the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes
+// permissions for this operation. If the bucket is not versioned, you need
+// the s3:GetObject and s3:GetObjectAttributes permissions. For more information,
+// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
+// in the Amazon S3 User Guide. If the object that you request does not exist,
+// the error Amazon S3 returns depends on whether you also have the s3:ListBucket
+// permission. If you have the s3:ListBucket permission on the bucket, Amazon
+// S3 returns an HTTP status code 404 Not Found ("no such key") error. If
+// you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP
+// status code 403 Forbidden ("access denied") error.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// # Encryption
+//
+// Encryption request headers, like x-amz-server-side-encryption, should not
+// be sent for HEAD requests if your object uses server-side encryption with
+// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption
+// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with
+// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
+// header is used when you PUT an object to S3 and want to specify the encryption
+// method. If you include this header in a GET request for an object that uses
+// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because
+// the encryption method can't be changed when you retrieve the object.
+//
// If you encrypt an object by using server-side encryption with customer-provided
// encryption keys (SSE-C) when you store the object in Amazon S3, then when
-// you retrieve the metadata from the object, you must use the following headers:
+// you retrieve the metadata from the object, you must use the following headers
+// to provide the encryption key for the server to be able to retrieve the object's
+// metadata. The headers are:
//
// - x-amz-server-side-encryption-customer-algorithm
//
@@ -4942,47 +5383,35 @@ func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *r
// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
//
-// - Encryption request headers, such as x-amz-server-side-encryption, should
-// not be sent for GET requests if your object uses server-side encryption
-// with Amazon Web Services KMS keys stored in Amazon Web Services Key Management
-// Service (SSE-KMS) or server-side encryption with Amazon S3 managed keys
-// (SSE-S3). If your object does use these types of keys, you'll get an HTTP
-// 400 Bad Request error.
+// Directory bucket permissions - For directory buckets, only server-side encryption
+// with Amazon S3 managed keys (SSE-S3) (AES256) is supported.
//
-// - The last modified property in this case is the creation date of the
-// object.
+// # Versioning
+//
+// Directory buckets - S3 Versioning isn't enabled and supported for directory
+// buckets. For this API operation, only the null value of the version ID is
+// supported by directory buckets. You can only specify null to the versionId
+// query parameter in the request.
+//
+// # Conditional request headers
//
// Consider the following when using request headers:
//
// - If both of the If-Match and If-Unmodified-Since headers are present
// in the request as follows, then Amazon S3 returns the HTTP status code
// 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since
-// condition evaluates to false.
+// condition evaluates to false. For more information about conditional requests,
+// see RFC 7232 (https://tools.ietf.org/html/rfc7232).
//
// - If both of the If-None-Match and If-Modified-Since headers are present
// in the request as follows, then Amazon S3 returns the HTTP status code
// 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since
-// condition evaluates to true.
-//
-// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
-//
-// # Permissions
-//
-// The permissions that you need to use this operation depend on whether the
-// bucket is versioned. If the bucket is versioned, you need both the s3:GetObjectVersion
-// and s3:GetObjectVersionAttributes permissions for this operation. If the
-// bucket is not versioned, you need the s3:GetObject and s3:GetObjectAttributes
-// permissions. For more information, see Specifying Permissions in a Policy
-// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
-// in the Amazon S3 User Guide. If the object that you request does not exist,
-// the error Amazon S3 returns depends on whether you also have the s3:ListBucket
-// permission.
+// condition evaluates to true. For more information about conditional requests,
+// see RFC 7232 (https://tools.ietf.org/html/rfc7232).
//
-// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns
-// an HTTP status code 404 Not Found ("no such key") error.
+// # HTTP Host header syntax
//
-// - If you don't have the s3:ListBucket permission, Amazon S3 returns an
-// HTTP status code 403 Forbidden ("access denied") error.
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following actions are related to GetObjectAttributes:
//
@@ -5078,10 +5507,12 @@ func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *req
// GetObjectLegalHold API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Gets an object's current legal hold status. For more information, see Locking
// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
//
-// This action is not supported by Amazon S3 on Outposts.
+// This functionality is not supported for Amazon S3 on Outposts.
//
// The following action is related to GetObjectLegalHold:
//
@@ -5158,6 +5589,8 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration
// GetObjectLockConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Gets the Object Lock configuration for a bucket. The rule specified in the
// Object Lock configuration will be applied by default to every new object
// placed in the specified bucket. For more information, see Locking Objects
@@ -5238,10 +5671,12 @@ func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *req
// GetObjectRetention API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Retrieves an object's retention settings. For more information, see Locking
// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
//
-// This action is not supported by Amazon S3 on Outposts.
+// This functionality is not supported for Amazon S3 on Outposts.
//
// The following action is related to GetObjectRetention:
//
@@ -5318,6 +5753,8 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request
// GetObjectTagging API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns the tag-set of an object. You send the GET request against the tagging
// subresource associated with the object.
//
@@ -5413,6 +5850,8 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request
// GetObjectTorrent API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns torrent files from a bucket. BitTorrent can save you bandwidth when
// you're distributing large files.
//
@@ -5422,7 +5861,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request
//
// To use GET, you must have READ access to the object.
//
-// This action is not supported by Amazon S3 on Outposts.
+// This functionality is not supported for Amazon S3 on Outposts.
//
// The following action is related to GetObjectTorrent:
//
@@ -5499,6 +5938,8 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req
// GetPublicAccessBlock API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To
// use this operation, you must have the s3:GetBucketPublicAccessBlock permission.
// For more information about Amazon S3 permissions, see Specifying Permissions
@@ -5590,39 +6031,63 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou
output = &HeadBucketOutput{}
req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
return
}
// HeadBucket API operation for Amazon Simple Storage Service.
//
-// This action is useful to determine if a bucket exists and you have permission
-// to access it. The action returns a 200 OK if the bucket exists and you have
-// permission to access it.
+// You can use this operation to determine if a bucket exists and if you have
+// permission to access it. The action returns a 200 OK if the bucket exists
+// and you have permission to access it.
//
// If the bucket does not exist or you do not have permission to access it,
// the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404
// Not Found code. A message body is not included, so you cannot determine the
-// exception beyond these error codes.
+// exception beyond these HTTP response codes.
//
-// To use this operation, you must have permissions to perform the s3:ListBucket
-// action. The bucket owner has this permission by default and can grant this
-// permission to others. For more information about permissions, see Permissions
-// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
+// Directory buckets - You must make requests for this API operation to the
+// Zonal endpoint. These endpoints support virtual-hosted-style requests in
+// the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style
+// requests are not supported. For more information, see Regional and Zonal
+// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
//
-// To use this API operation against an access point, you must provide the alias
-// of the access point in place of the bucket name or specify the access point
-// ARN. When using the access point ARN, you must direct requests to the access
-// point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
-// When using the Amazon Web Services SDKs, you provide the ARN in place of
-// the bucket name. For more information, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html).
+// # Authentication and authorization
//
-// To use this API operation against an Object Lambda access point, provide
-// the alias of the Object Lambda access point in place of the bucket name.
-// If the Object Lambda access point alias in a request is not valid, the error
-// code InvalidAccessPointAliasError is returned. For more information about
-// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList).
+// All HeadBucket requests must be authenticated and signed by using IAM credentials
+// (access key ID and secret access key for the IAM identities). All headers
+// with the x-amz- prefix, including x-amz-copy-source, must be signed. For
+// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
+//
+// Directory bucket - You must use IAM credentials to authenticate and authorize
+// your access to the HeadBucket API operation, instead of using the temporary
+// security credentials through the CreateSession API operation.
+//
+// Amazon Web Services CLI or SDKs handles authentication and authorization
+// on your behalf.
+//
+// Permissions
+//
+// - General purpose bucket permissions - To use this operation, you must
+// have permissions to perform the s3:ListBucket action. The bucket owner
+// has this permission by default and can grant this permission to others.
+// For more information about permissions, see Managing access permissions
+// to your Amazon S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - You must have the s3express:CreateSession
+// permission in the Action element of a policy. By default, the session
+// is in the ReadWrite mode. If you want to restrict the access, you can
+// explicitly set the s3express:SessionMode condition key to ReadOnly on
+// the bucket. For more information about example bucket policies, see Example
+// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
+// and Amazon Web Services Identity and Access Management (IAM) identity-based
+// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
+// in the Amazon S3 User Guide.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -5700,19 +6165,70 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou
// HeadObject API operation for Amazon Simple Storage Service.
//
-// The HEAD action retrieves metadata from an object without returning the object
-// itself. This action is useful if you're only interested in an object's metadata.
-// To use HEAD, you must have READ access to the object.
+// The HEAD operation retrieves metadata from an object without returning the
+// object itself. This operation is useful if you're interested only in an object's
+// metadata.
+//
+// A HEAD request has the same options as a GET operation on an object. The
+// response is identical to the GET response except that there is no response
+// body. Because of this, if the HEAD request generates an error, it returns
+// a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405
+// Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not
+// possible to retrieve the exact exception of these error codes.
+//
+// Request headers are limited to 8 KB in size. For more information, see Common
+// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html).
+//
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
//
-// A HEAD request has the same options as a GET action on an object. The response
-// is identical to the GET response except that there is no response body. Because
-// of this, if the HEAD request generates an error, it returns a generic 400
-// Bad Request, 403 Forbidden or 404 Not Found code. It is not possible to retrieve
-// the exact exception beyond these error codes.
+// Permissions
+//
+// - General purpose bucket permissions - To use HEAD, you must have the
+// s3:GetObject permission. You need the relevant read object (or version)
+// permission for this operation. For more information, see Actions, resources,
+// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html)
+// in the Amazon S3 User Guide. If the object you request doesn't exist,
+// the error that Amazon S3 returns depends on whether you also have the
+// s3:ListBucket permission. If you have the s3:ListBucket permission on
+// the bucket, Amazon S3 returns an HTTP status code 404 Not Found error.
+// If you don’t have the s3:ListBucket permission, Amazon S3 returns an
+// HTTP status code 403 Forbidden error.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// # Encryption
+//
+// Encryption request headers, like x-amz-server-side-encryption, should not
+// be sent for HEAD requests if your object uses server-side encryption with
+// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption
+// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with
+// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption
+// header is used when you PUT an object to S3 and want to specify the encryption
+// method. If you include this header in a HEAD request for an object that uses
+// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because
+// the encryption method can't be changed when you retrieve the object.
//
// If you encrypt an object by using server-side encryption with customer-provided
// encryption keys (SSE-C) when you store the object in Amazon S3, then when
-// you retrieve the metadata from the object, you must use the following headers:
+// you retrieve the metadata from the object, you must use the following headers
+// to provide the encryption key for the server to be able to retrieve the object's
+// metadata. The headers are:
//
// - x-amz-server-side-encryption-customer-algorithm
//
@@ -5721,48 +6237,32 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou
// - x-amz-server-side-encryption-customer-key-MD5
//
// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
-// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
-//
-// - Encryption request headers, like x-amz-server-side-encryption, should
-// not be sent for GET requests if your object uses server-side encryption
-// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side
-// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side
-// encryption with Amazon S3 managed encryption keys (SSE-S3). If your object
-// does use these types of keys, you’ll get an HTTP 400 Bad Request error.
-//
-// - The last modified property in this case is the creation date of the
-// object.
-//
-// Request headers are limited to 8 KB in size. For more information, see Common
-// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html).
+// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
+// in the Amazon S3 User Guide.
//
-// Consider the following when using request headers:
+// Directory bucket permissions - For directory buckets, only server-side encryption
+// with Amazon S3 managed keys (SSE-S3) (AES256) is supported.
//
-// - Consideration 1 – If both of the If-Match and If-Unmodified-Since
-// headers are present in the request as follows: If-Match condition evaluates
-// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon
-// S3 returns 200 OK and the data requested.
+// Versioning
//
-// - Consideration 2 – If both of the If-None-Match and If-Modified-Since
-// headers are present in the request as follows: If-None-Match condition
-// evaluates to false, and; If-Modified-Since condition evaluates to true;
-// Then Amazon S3 returns the 304 Not Modified response code.
+// - If the current version of the object is a delete marker, Amazon S3 behaves
+// as if the object was deleted and includes x-amz-delete-marker: true in
+// the response.
//
-// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
+// - If the specified version is a delete marker, the response returns a
+// 405 Method Not Allowed error and the Last-Modified: timestamp response
+// header.
//
-// # Permissions
+// - Directory buckets - Delete marker is not supported by directory buckets.
//
-// You need the relevant read object (or version) permission for this operation.
-// For more information, see Actions, resources, and condition keys for Amazon
-// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html).
-// If the object you request doesn't exist, the error that Amazon S3 returns
-// depends on whether you also have the s3:ListBucket permission.
+// - Directory buckets - S3 Versioning isn't enabled and supported for directory
+// buckets. For this API operation, only the null value of the version ID
+// is supported by directory buckets. You can only specify null to the versionId
+// query parameter in the request.
//
-// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns
-// an HTTP status code 404 error.
+// # HTTP Host header syntax
//
-// - If you don’t have the s3:ListBucket permission, Amazon S3 returns
-// an HTTP status code 403 error.
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following actions are related to HeadObject:
//
@@ -5844,6 +6344,8 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics
// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Lists the analytics configurations for the bucket. You can have up to 1,000
// analytics configurations per bucket.
//
@@ -5943,6 +6445,8 @@ func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucket
// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Lists the S3 Intelligent-Tiering configuration from the specified bucket.
//
// The S3 Intelligent-Tiering storage class is designed to optimize storage
@@ -6041,6 +6545,8 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory
// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns a list of inventory configurations for the bucket. You can have up
// to 1,000 analytics configurations per bucket.
//
@@ -6140,6 +6646,8 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf
// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Lists the metrics configurations for the bucket. The metrics configurations
// are only for the request metrics of the bucket and do not provide information
// on daily storage metrics. You can have up to 1,000 configurations per bucket.
@@ -6240,6 +6748,8 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request,
// ListBuckets API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns a list of all buckets owned by the authenticated sender of the request.
// To use this operation, you must have the s3:ListAllMyBuckets permission.
//
@@ -6274,6 +6784,160 @@ func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, op
return out, req.Send()
}
+const opListDirectoryBuckets = "ListDirectoryBuckets"
+
+// ListDirectoryBucketsRequest generates a "aws/request.Request" representing the
+// client's request for the ListDirectoryBuckets operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListDirectoryBuckets for more information on using the ListDirectoryBuckets
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the ListDirectoryBucketsRequest method.
+// req, resp := client.ListDirectoryBucketsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets
+func (c *S3) ListDirectoryBucketsRequest(input *ListDirectoryBucketsInput) (req *request.Request, output *ListDirectoryBucketsOutput) {
+ op := &request.Operation{
+ Name: opListDirectoryBuckets,
+ HTTPMethod: "GET",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"ContinuationToken"},
+ OutputTokens: []string{"ContinuationToken"},
+ LimitToken: "MaxDirectoryBuckets",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListDirectoryBucketsInput{}
+ }
+
+ output = &ListDirectoryBucketsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListDirectoryBuckets API operation for Amazon Simple Storage Service.
+//
+// Returns a list of all Amazon S3 directory buckets owned by the authenticated
+// sender of the request. For more information about directory buckets, see
+// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
+// in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Regional endpoint. These endpoints support path-style
+// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+// . Virtual-hosted-style requests aren't supported. For more information, see
+// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// # Permissions
+//
+// You must have the s3express:ListAllMyDirectoryBuckets permission in an IAM
+// identity-based policy instead of a bucket policy. Cross-account access to
+// this API operation isn't supported. This operation can only be performed
+// by the Amazon Web Services account that owns the resource. For more information
+// about directory bucket policies and permissions, see Amazon Web Services
+// Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
+// in the Amazon S3 User Guide.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListDirectoryBuckets for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets
+func (c *S3) ListDirectoryBuckets(input *ListDirectoryBucketsInput) (*ListDirectoryBucketsOutput, error) {
+ req, out := c.ListDirectoryBucketsRequest(input)
+ return out, req.Send()
+}
+
+// ListDirectoryBucketsWithContext is the same as ListDirectoryBuckets with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListDirectoryBuckets for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListDirectoryBucketsWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, opts ...request.Option) (*ListDirectoryBucketsOutput, error) {
+ req, out := c.ListDirectoryBucketsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListDirectoryBucketsPages iterates over the pages of a ListDirectoryBuckets operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListDirectoryBuckets method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListDirectoryBuckets operation.
+// pageNum := 0
+// err := client.ListDirectoryBucketsPages(params,
+// func(page *s3.ListDirectoryBucketsOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+func (c *S3) ListDirectoryBucketsPages(input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool) error {
+ return c.ListDirectoryBucketsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListDirectoryBucketsPagesWithContext same as ListDirectoryBucketsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListDirectoryBucketsPagesWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListDirectoryBucketsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListDirectoryBucketsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ for p.Next() {
+ if !fn(p.Page().(*ListDirectoryBucketsOutput), !p.HasNextPage()) {
+ break
+ }
+ }
+
+ return p.Err()
+}
+
const opListMultipartUploads = "ListMultipartUploads"
// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
@@ -6323,28 +6987,79 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req
// ListMultipartUploads API operation for Amazon Simple Storage Service.
//
-// This action lists in-progress multipart uploads. An in-progress multipart
-// upload is a multipart upload that has been initiated using the Initiate Multipart
-// Upload request, but has not yet been completed or aborted.
+// This operation lists in-progress multipart uploads in a bucket. An in-progress
+// multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload
+// request, but has not yet been completed or aborted.
+//
+// Directory buckets - If multipart uploads in a directory bucket are in progress,
+// you can't delete the bucket until all the in-progress multipart uploads are
+// aborted or completed.
+//
+// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads
+// in the response. The limit of 1,000 multipart uploads is also the default
+// value. You can further limit the number of uploads in a response by specifying
+// the max-uploads request parameter. If there are more than 1,000 multipart
+// uploads that satisfy your ListMultipartUploads request, the response returns
+// an IsTruncated element with the value of true, a NextKeyMarker element, and
+// a NextUploadIdMarker element. To list the remaining multipart uploads, you
+// need to make subsequent ListMultipartUploads requests. In these requests,
+// include two query parameters: key-marker and upload-id-marker. Set the value
+// of key-marker to the NextKeyMarker value from the previous response. Similarly,
+// set the value of upload-id-marker to the NextUploadIdMarker value from the
+// previous response.
+//
+// Directory buckets - The upload-id-marker element and the NextUploadIdMarker
+// element aren't supported by directory buckets. To list the additional multipart
+// uploads, you only need to set the value of key-marker to the NextKeyMarker
+// value from the previous response.
+//
+// For more information about multipart uploads, see Uploading Objects Using
+// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
+// in the Amazon S3 User Guide.
//
-// This action returns at most 1,000 multipart uploads in the response. 1,000
-// multipart uploads is the maximum number of uploads a response can include,
-// which is also the default value. You can further limit the number of uploads
-// in a response by specifying the max-uploads parameter in the response. If
-// additional multipart uploads satisfy the list criteria, the response will
-// contain an IsTruncated element with the value true. To list the additional
-// multipart uploads, use the key-marker and upload-id-marker request parameters.
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
//
-// In the response, the uploads are sorted by key. If your application has initiated
-// more than one multipart upload using the same object key, then uploads in
-// the response are first sorted by key. Additionally, uploads are sorted in
-// ascending order within each key by the upload initiation time.
+// Permissions
//
-// For more information on multipart uploads, see Uploading Objects Using Multipart
-// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+// - General purpose bucket permissions - For information about permissions
+// required to use the multipart upload API, see Multipart Upload and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// Sorting of multipart uploads in response
+//
+// - General purpose bucket - In the ListMultipartUploads response, the multipart
+// uploads are sorted based on two criteria: Key-based sorting - Multipart
+// uploads are initially sorted in ascending order based on their object
+// keys. Time-based sorting - For uploads that share the same object key,
+// they are further sorted in ascending order based on the upload initiation
+// time. Among uploads with the same key, the one that was initiated first
+// will appear before the ones that were initiated later.
//
-// For information on permissions required to use the multipart upload API,
-// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+// - Directory bucket - In the ListMultipartUploads response, the multipart
+// uploads aren't sorted lexicographically based on the object keys.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to ListMultipartUploads:
//
@@ -6486,6 +7201,8 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req
// ListObjectVersions API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns metadata about all versions of the objects in a bucket. You can also
// use request parameters as selection criteria to return metadata about a subset
// of all the object versions.
@@ -6498,8 +7215,6 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req
//
// To use this operation, you must have READ access to the bucket.
//
-// This action is not supported by Amazon S3 on Outposts.
-//
// The following operations are related to ListObjectVersions:
//
// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html)
@@ -6638,6 +7353,8 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request,
// ListObjects API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Returns some or all (up to 1,000) of the objects in a bucket. You can use
// the request parameters as selection criteria to return a subset of the objects
// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure
@@ -6798,28 +7515,58 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque
// You can use the request parameters as selection criteria to return a subset
// of the objects in a bucket. A 200 OK response can contain valid or invalid
// XML. Make sure to design your application to parse the contents of the response
-// and handle it appropriately. Objects are returned sorted in an ascending
-// order of the respective key names in the list. For more information about
-// listing objects, see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html)
+// and handle it appropriately. For more information about listing objects,
+// see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html)
+// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html).
+//
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
//
-// To use this operation, you must have READ access to the bucket.
+// Permissions
//
-// To use this action in an Identity and Access Management (IAM) policy, you
-// must have permission to perform the s3:ListBucket action. The bucket owner
-// has this permission by default and can grant this permission to others. For
-// more information about permissions, see Permissions Related to Bucket Subresource
-// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
-// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
-// in the Amazon S3 User Guide.
+// - General purpose bucket permissions - To use this operation, you must
+// have READ access to the bucket. You must have permission to perform the
+// s3:ListBucket action. The bucket owner has this permission by default
+// and can grant this permission to others. For more information about permissions,
+// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
+// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
+// in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// Sorting order of returned objects
+//
+// - General purpose bucket - For general purpose buckets, ListObjectsV2
+// returns objects in lexicographical order based on their key names.
+//
+// - Directory bucket - For directory buckets, ListObjectsV2 does not return
+// objects in lexicographical order.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// This section describes the latest revision of this action. We recommend that
// you use this revised API operation for application development. For backward
// compatibility, Amazon S3 continues to support the prior version of this API
// operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html).
//
-// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html).
-//
// The following operations are related to ListObjectsV2:
//
// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
@@ -6962,24 +7709,58 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp
// ListParts API operation for Amazon Simple Storage Service.
//
// Lists the parts that have been uploaded for a specific multipart upload.
-// This operation must include the upload ID, which you obtain by sending the
-// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)).
-// This request returns a maximum of 1,000 uploaded parts. The default number
-// of parts returned is 1,000 parts. You can restrict the number of parts returned
-// by specifying the max-parts request parameter. If your multipart upload consists
-// of more than 1,000 parts, the response returns an IsTruncated field with
-// the value of true, and a NextPartNumberMarker element. In subsequent ListParts
-// requests you can include the part-number-marker query string parameter and
-// set its value to the NextPartNumberMarker field value from the previous response.
-//
-// If the upload was created using a checksum algorithm, you will need to have
-// permission to the kms:Decrypt action for the request to succeed.
+//
+// To use this operation, you must provide the upload ID in the request. You
+// obtain this uploadID by sending the initiate multipart upload request through
+// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
+//
+// The ListParts request returns a maximum of 1,000 uploaded parts. The limit
+// of 1,000 parts is also the default value. You can restrict the number of
+// parts in a response by specifying the max-parts request parameter. If your
+// multipart upload consists of more than 1,000 parts, the response returns
+// an IsTruncated field with the value of true, and a NextPartNumberMarker element.
+// To list remaining uploaded parts, in subsequent ListParts requests, include
+// the part-number-marker query string parameter and set its value to the NextPartNumberMarker
+// field value from the previous response.
//
// For more information on multipart uploads, see Uploading Objects Using Multipart
-// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
+// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
+// in the Amazon S3 User Guide.
+//
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
//
-// For information on permissions required to use the multipart upload API,
-// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
+// Permissions
+//
+// - General purpose bucket permissions - For information about permissions
+// required to use the multipart upload API, see Multipart Upload and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon S3 User Guide. If the upload was created using server-side
+// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer
+// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you
+// must have permission to the kms:Decrypt action for the ListParts request
+// to succeed.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to ListParts:
//
@@ -7118,6 +7899,8 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC
// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer
// Acceleration is a bucket-level feature that enables you to perform faster
// data transfers to Amazon S3.
@@ -7230,9 +8013,11 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request
// PutBucketAcl API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Sets the permissions on an existing bucket using access control lists (ACL).
// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
-// To set the ACL of a bucket, you must have WRITE_ACP permission.
+// To set the ACL of a bucket, you must have the WRITE_ACP permission.
//
// You can use one of the following two ways to set a bucket's permissions:
//
@@ -7398,6 +8183,8 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon
// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Sets an analytics configuration for the bucket (specified by the analytics
// configuration ID). You can have up to 1,000 analytics configurations per
// bucket.
@@ -7520,6 +8307,8 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque
// PutBucketCors API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Sets the cors configuration for your bucket. If the configuration exists,
// Amazon S3 replaces it.
//
@@ -7640,21 +8429,21 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r
// PutBucketEncryption API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// This action uses the encryption subresource to configure default encryption
// and Amazon S3 Bucket Keys for an existing bucket.
//
// By default, all buckets have a default encryption configuration that uses
// server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally
// configure default encryption for a bucket by using server-side encryption
-// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side
-// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption
-// with customer-provided keys (SSE-C). If you specify default encryption by
-// using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information
-// about bucket default encryption, see Amazon S3 bucket default encryption
-// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
-// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see
-// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
-// in the Amazon S3 User Guide.
+// with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side
+// encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default
+// encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html). If you
+// use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
+// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3
+// does not validate the KMS key ID provided in PutBucketEncryption requests.
//
// This action requires Amazon Web Services Signature Version 4. For more information,
// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html).
@@ -7744,6 +8533,8 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt
// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You
// can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
//
@@ -7869,6 +8660,8 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon
// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// This implementation of the PUT action adds an inventory configuration (identified
// by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations
// per bucket.
@@ -8023,6 +8816,8 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
// PutBucketLifecycle API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html).
// This version has been deprecated. Existing lifecycle configurations will
// work. For new lifecycle configurations, use the updated API.
@@ -8152,6 +8947,8 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Creates a new lifecycle configuration for the bucket or replaces an existing
// lifecycle configuration. Keep in mind that this will overwrite an existing
// lifecycle configuration, so if you want to retain any configuration details,
@@ -8159,10 +8956,10 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
// about lifecycle configuration, see Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html).
//
// Bucket lifecycle configuration now supports specifying a lifecycle rule using
-// an object key name prefix, one or more object tags, or a combination of both.
-// Accordingly, this section describes the latest API. The previous version
-// of the API supported filtering based only on an object key name prefix, which
-// is supported for backward compatibility. For the related API description,
+// an object key name prefix, one or more object tags, object size, or any combination
+// of these. Accordingly, this section describes the latest API. The previous
+// version of the API supported filtering based only on an object key name prefix,
+// which is supported for backward compatibility. For the related API description,
// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html).
//
// # Rules
@@ -8173,8 +8970,8 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
// adjustable. Each rule consists of the following:
//
// - A filter identifying a subset of objects to which the rule applies.
-// The filter can be based on a key name prefix, object tags, or a combination
-// of both.
+// The filter can be based on a key name prefix, object tags, object size,
+// or any combination of these.
//
// - A status indicating whether the rule is in effect.
//
@@ -8295,6 +9092,8 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request
// PutBucketLogging API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Set the logging parameters for a bucket and to specify permissions for who
// can view and modify the logging parameters. All logs are saved to buckets
// in the same Amazon Web Services Region as the source bucket. To set the logging
@@ -8422,6 +9221,8 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu
// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Sets a metrics configuration (specified by the metrics configuration ID)
// for the bucket. You can have up to 1,000 metrics configurations per bucket.
// If you're updating an existing metrics configuration, note that this is a
@@ -8532,6 +9333,8 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
// PutBucketNotification API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html)
// operation.
//
@@ -8611,6 +9414,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat
// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Enables notifications of specified events for a bucket. For more information
// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
//
@@ -8740,6 +9545,8 @@ func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControls
// PutBucketOwnershipControls API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this
// operation, you must have the s3:PutBucketOwnershipControls permission. For
// more information about Amazon S3 permissions, see Specifying permissions
@@ -8830,11 +9637,21 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R
// PutBucketPolicy API operation for Amazon Simple Storage Service.
//
-// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using
-// an identity other than the root user of the Amazon Web Services account that
-// owns the bucket, the calling identity must have the PutBucketPolicy permissions
-// on the specified bucket and belong to the bucket owner's account in order
-// to use this operation.
+// Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
+//
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Regional endpoint. These endpoints support path-style
+// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+// . Virtual-hosted-style requests aren't supported. For more information, see
+// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
+//
+// # Permissions
+//
+// If you are using an identity other than the root user of the Amazon Web Services
+// account that owns the bucket, the calling identity must both have the PutBucketPolicy
+// permissions on the specified bucket and belong to the bucket owner's account
+// in order to use this operation.
//
// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access
// Denied error. If you have the correct permissions, but you're not using an
@@ -8849,7 +9666,33 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R
// these API actions by VPC endpoint policies and Amazon Web Services Organizations
// policies.
//
-// For more information, see Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html).
+// - General purpose bucket permissions - The s3:PutBucketPolicy permission
+// is required in a policy. For more information about general purpose buckets
+// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
+// in the Amazon S3 User Guide.
+//
+// - Directory bucket permissions - To grant access to this API operation,
+// you must have the s3express:PutBucketPolicy permission in an IAM identity-based
+// policy instead of a bucket policy. Cross-account access to this API operation
+// isn't supported. This operation can only be performed by the Amazon Web
+// Services account that owns the resource. For more information about directory
+// bucket policies and permissions, see Amazon Web Services Identity and
+// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
+// in the Amazon S3 User Guide.
+//
+// # Example bucket policies
+//
+// General purpose buckets example bucket policies - See Bucket policy examples
+// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html)
+// in the Amazon S3 User Guide.
+//
+// Directory bucket example bucket policies - See Example bucket policies for
+// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
+// in the Amazon S3 User Guide.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.
//
// The following operations are related to PutBucketPolicy:
//
@@ -8933,6 +9776,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
// PutBucketReplication API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Creates a replication configuration or replaces an existing one. For more
// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
// in the Amazon S3 User Guide.
@@ -8941,6 +9786,9 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
// configuration, you provide the name of the destination bucket or buckets
// where you want Amazon S3 to replicate objects, the IAM role that Amazon S3
// can assume to replicate objects on your behalf, and other relevant information.
+// You can invoke this request for a specific Amazon Web Services Region by
+// using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion)
+// condition key.
//
// A replication configuration must include at least one rule, and can contain
// a maximum of 1,000. Each rule identifies a subset of objects to replicate
@@ -9069,6 +9917,8 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput)
// PutBucketRequestPayment API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Sets the request payment configuration for a bucket. By default, the bucket
// owner pays for downloads from the bucket. This configuration parameter enables
// the bucket owner (only) to specify that the person requesting the download
@@ -9157,6 +10007,8 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
// PutBucketTagging API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Sets the tags for a bucket.
//
// Use tags to organize your Amazon Web Services bill to reflect your own cost
@@ -9167,7 +10019,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
// name, and then organize your billing information to see the total cost of
// that application across several services. For more information, see Cost
// Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
-// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html).
+// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html).
//
// When this operation sets the tags for a bucket, it will overwrite any current
// tags the bucket already has. You cannot use this operation to add tags to
@@ -9179,22 +10031,20 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html).
//
-// PutBucketTagging has the following special errors:
+// PutBucketTagging has the following special errors. For more Amazon S3 errors
+// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html).
//
-// - Error code: InvalidTagError Description: The tag provided was not a
-// valid tag. This error can occur if the tag did not pass input validation.
-// For information about tag restrictions, see User-Defined Tag Restrictions
-// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html)
-// and Amazon Web Services-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html).
+// - InvalidTag - The tag provided was not a valid tag. This error can occur
+// if the tag did not pass input validation. For more information, see Using
+// Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html).
//
-// - Error code: MalformedXMLError Description: The XML provided does not
-// match the schema.
+// - MalformedXML - The XML provided does not match the schema.
//
-// - Error code: OperationAbortedError Description: A conflicting conditional
-// action is currently in progress against this resource. Please try again.
+// - OperationAborted - A conflicting conditional action is currently in
+// progress against this resource. Please try again.
//
-// - Error code: InternalError Description: The service was unable to apply
-// the provided tag to the bucket.
+// - InternalError - The service was unable to apply the provided tag to
+// the bucket.
//
// The following operations are related to PutBucketTagging:
//
@@ -9278,6 +10128,8 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r
// PutBucketVersioning API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Sets the versioning state of an existing bucket.
//
// You can set the versioning state with one of the following values:
@@ -9389,6 +10241,8 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request
// PutBucketWebsite API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Sets the configuration of the website that is specified in the website subresource.
// To configure a bucket as a website, you can add this subresource on the bucket
// with website configuration information such as the file name of the index
@@ -9456,6 +10310,8 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request
// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
// in the Amazon S3 User Guide.
//
+// The maximum request length is limited to 128 KB.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
@@ -9527,87 +10383,83 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp
// PutObject API operation for Amazon Simple Storage Service.
//
-// Adds an object to a bucket. You must have WRITE permissions on a bucket to
-// add an object to it.
-//
-// Amazon S3 never adds partial objects; if you receive a success response,
-// Amazon S3 added the entire object to the bucket. You cannot use PutObject
-// to only update a single piece of metadata for an existing object. You must
-// put the entire object with updated metadata if you want to update some values.
-//
-// Amazon S3 is a distributed system. If it receives multiple write requests
-// for the same object simultaneously, it overwrites all but the last object
-// written. To prevent objects from being deleted or overwritten, you can use
-// Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html).
-//
-// To ensure that data is not corrupted traversing the network, use the Content-MD5
-// header. When you use this header, Amazon S3 checks the object against the
-// provided MD5 value and, if they do not match, returns an error. Additionally,
-// you can calculate the MD5 while putting an object to Amazon S3 and compare
-// the returned ETag to the calculated MD5 value.
-//
-// - To successfully complete the PutObject request, you must have the s3:PutObject
-// in your IAM permissions.
+// Adds an object to a bucket.
//
-// - To successfully change the objects acl of your PutObject request, you
-// must have the s3:PutObjectAcl in your IAM permissions.
+// - Amazon S3 never adds partial objects; if you receive a success response,
+// Amazon S3 added the entire object to the bucket. You cannot use PutObject
+// to only update a single piece of metadata for an existing object. You
+// must put the entire object with updated metadata if you want to update
+// some values.
//
-// - To successfully set the tag-set with your PutObject request, you must
-// have the s3:PutObjectTagging in your IAM permissions.
+// - If your bucket uses the bucket owner enforced setting for Object Ownership,
+// ACLs are disabled and no longer affect permissions. All objects written
+// to the bucket by any account will be owned by the bucket owner.
//
-// - The Content-MD5 header is required for any request to upload an object
-// with a retention period configured using Amazon S3 Object Lock. For more
-// information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview
-// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)
+// - Directory buckets - For directory buckets, you must make requests for
+// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
//
-// You have four mutually exclusive options to protect data using server-side
-// encryption in Amazon S3, depending on how you choose to manage the encryption
-// keys. Specifically, the encryption key options are Amazon S3 managed keys
-// (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided
-// keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using
-// Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon
-// S3 to encrypt data at rest by using server-side encryption with other key
-// options. For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html).
-//
-// When adding a new object, you can use headers to grant ACL-based permissions
-// to individual Amazon Web Services accounts or to predefined groups defined
-// by Amazon S3. These permissions are then added to the ACL on the object.
-// By default, all objects are private. Only the owner has full access control.
-// For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
-// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
-//
-// If the bucket that you're uploading objects to uses the bucket owner enforced
-// setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions.
-// Buckets that use this setting only accept PUT requests that don't specify
-// an ACL or PUT requests that specify bucket owner full control ACLs, such
-// as the bucket-owner-full-control canned ACL or an equivalent form of this
-// ACL expressed in the XML format. PUT requests that contain other ACLs (for
-// example, custom grants to certain Amazon Web Services accounts) fail and
-// return a 400 error with the error code AccessControlListNotSupported. For
-// more information, see Controlling ownership of objects and disabling ACLs
-// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
-// in the Amazon S3 User Guide.
-//
-// If your bucket uses the bucket owner enforced setting for Object Ownership,
-// all objects written to the bucket by any account will be owned by the bucket
-// owner.
-//
-// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
-// objects. The STANDARD storage class provides high durability and high availability.
-// Depending on performance needs, you can specify a different Storage Class.
-// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
-// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
-// in the Amazon S3 User Guide.
-//
-// If you enable versioning for a bucket, Amazon S3 automatically generates
-// a unique version ID for the object being stored. Amazon S3 returns this ID
-// in the response. When you enable versioning for a bucket, if Amazon S3 receives
-// multiple write requests for the same object simultaneously, it stores all
-// of the objects. For more information about versioning, see Adding Objects
-// to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html).
-// For information about returning the versioning state of a bucket, see GetBucketVersioning
-// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html).
+// Amazon S3 is a distributed system. If it receives multiple write requests
+// for the same object simultaneously, it overwrites all but the last object
+// written. However, Amazon S3 provides features that can modify this behavior:
+//
+// - S3 Object Lock - To prevent objects from being deleted or overwritten,
+// you can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html)
+// in the Amazon S3 User Guide. This functionality is not supported for directory
+// buckets.
+//
+// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3
+// receives multiple write requests for the same object simultaneously, it
+// stores all versions of the objects. For each write request that is made
+// to the same object, Amazon S3 automatically generates a unique version
+// ID of that object being stored in Amazon S3. You can retrieve, replace,
+// or delete any version of the object. For more information about versioning,
+// see Adding Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html)
+// in the Amazon S3 User Guide. For information about returning the versioning
+// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html).
+// This functionality is not supported for directory buckets.
+//
+// Permissions
+//
+// - General purpose bucket permissions - The following permissions are required
+// in your policies when your PutObject request includes specific headers.
+// s3:PutObject - To successfully complete the PutObject request, you must
+// always have the s3:PutObject permission on a bucket to add an object to
+// it. s3:PutObjectAcl - To successfully change the objects ACL of your PutObject
+// request, you must have the s3:PutObjectAcl. s3:PutObjectTagging - To successfully
+// set the tag-set with your PutObject request, you must have the s3:PutObjectTagging.
+//
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// Data integrity with Content-MD5
+//
+// - General purpose bucket - To ensure that data is not corrupted traversing
+// the network, use the Content-MD5 header. When you use this header, Amazon
+// S3 checks the object against the provided MD5 value and, if they do not
+// match, Amazon S3 returns an error. Alternatively, when the object's ETag
+// is its MD5 digest, you can calculate the MD5 while putting the object
+// to Amazon S3 and compare the returned ETag to the calculated MD5 value.
+//
+// - Directory bucket - This functionality is not supported for directory
+// buckets.
+//
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// For more information about related Amazon S3 APIs, see the following:
//
@@ -9690,13 +10542,15 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request
// PutObjectAcl API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Uses the acl subresource to set the access control list (ACL) permissions
-// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission
-// to set the ACL of an object. For more information, see What permissions can
-// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions)
+// for a new or existing object in an S3 bucket. You must have the WRITE_ACP
+// permission to set the ACL of an object. For more information, see What permissions
+// can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions)
// in the Amazon S3 User Guide.
//
-// This action is not supported by Amazon S3 on Outposts.
+// This functionality is not supported for Amazon S3 on Outposts.
//
// Depending on your application needs, you can choose to set the ACL on an
// object using either the request body or the headers. For example, if you
@@ -9865,10 +10719,12 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req
// PutObjectLegalHold API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Applies a legal hold configuration to the specified object. For more information,
// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
//
-// This action is not supported by Amazon S3 on Outposts.
+// This functionality is not supported for Amazon S3 on Outposts.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -9945,6 +10801,8 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration
// PutObjectLockConfiguration API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Places an Object Lock configuration on the specified bucket. The rule specified
// in the Object Lock configuration will be applied by default to every new
// object placed in the specified bucket. For more information, see Locking
@@ -9955,8 +10813,8 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration
// - The DefaultRetention period can be either Days or Years but you must
// select one. You cannot specify Days and Years at the same time.
//
-// - You can only enable Object Lock for new buckets. If you want to turn
-// on Object Lock for an existing bucket, contact Amazon Web Services Support.
+// - You can enable Object Lock for new or existing buckets. For more information,
+// see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -10033,13 +10891,15 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req
// PutObjectRetention API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Places an Object Retention configuration on an object. For more information,
// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
// Users or accounts require the s3:PutObjectRetention permission in order to
// place an Object Retention configuration on objects. Bypassing a Governance
// Retention configuration requires the s3:BypassGovernanceRetention permission.
//
-// This action is not supported by Amazon S3 on Outposts.
+// This functionality is not supported for Amazon S3 on Outposts.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -10116,12 +10976,15 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request
// PutObjectTagging API operation for Amazon Simple Storage Service.
//
-// Sets the supplied tag-set to an object that already exists in a bucket.
+// This operation is not supported by directory buckets.
//
-// A tag is a key-value pair. You can associate tags with an object by sending
-// a PUT request against the tagging subresource that is associated with the
-// object. You can retrieve tags by sending a GET request. For more information,
-// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html).
+// Sets the supplied tag-set to an object that already exists in a bucket. A
+// tag is a key-value pair. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html).
+//
+// You can associate tags with an object by sending a PUT request against the
+// tagging subresource that is associated with the object. You can retrieve
+// tags by sending a GET request. For more information, see GetObjectTagging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html).
//
// For tagging-related restrictions related to characters and encodings, see
// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html).
@@ -10134,22 +10997,20 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request
// To put tags of any other version, use the versionId query parameter. You
// also need permission for the s3:PutObjectVersionTagging action.
//
-// For information about the Amazon S3 object tagging feature, see Object Tagging
-// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
-//
-// PutObjectTagging has the following special errors:
+// PutObjectTagging has the following special errors. For more Amazon S3 errors
+// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html).
//
-// - Code: InvalidTagError Cause: The tag provided was not a valid tag. This
-// error can occur if the tag did not pass input validation. For more information,
-// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html).
+// - InvalidTag - The tag provided was not a valid tag. This error can occur
+// if the tag did not pass input validation. For more information, see Object
+// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html).
//
-// - Code: MalformedXMLError Cause: The XML provided does not match the schema.
+// - MalformedXML - The XML provided does not match the schema.
//
-// - Code: OperationAbortedError Cause: A conflicting conditional action
-// is currently in progress against this resource. Please try again.
+// - OperationAborted - A conflicting conditional action is currently in
+// progress against this resource. Please try again.
//
-// - Code: InternalError Cause: The service was unable to apply the provided
-// tag to the object.
+// - InternalError - The service was unable to apply the provided tag to
+// the object.
//
// The following operations are related to PutObjectTagging:
//
@@ -10233,6 +11094,8 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req
// PutPublicAccessBlock API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Creates or modifies the PublicAccessBlock configuration for an Amazon S3
// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
// permission. For more information about Amazon S3 permissions, see Specifying
@@ -10329,14 +11192,14 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
// RestoreObject API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// # Restores an archived copy of an object back into Amazon S3
//
-// This action is not supported by Amazon S3 on Outposts.
+// This functionality is not supported for Amazon S3 on Outposts.
//
// This action performs the following types of requests:
//
-// - select - Perform a select query on an archived object
-//
// - restore an archive - Restore an archived object
//
// For more information about the S3 structure in the request body, see the
@@ -10350,44 +11213,6 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
// in the Amazon S3 User Guide
//
-// Define the SQL expression for the SELECT type of restoration for your query
-// in the request body's SelectParameters structure. You can use expressions
-// like the following examples.
-//
-// - The following expression returns all records from the specified object.
-// SELECT * FROM Object
-//
-// - Assuming that you are not using any headers for data stored in the object,
-// you can specify columns with positional headers. SELECT s._1, s._2 FROM
-// Object s WHERE s._3 > 100
-//
-// - If you have headers and you set the fileHeaderInfo in the CSV structure
-// in the request body to USE, you can specify headers in the query. (If
-// you set the fileHeaderInfo field to IGNORE, the first row is skipped for
-// the query.) You cannot mix ordinal positions with header column names.
-// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
-//
-// When making a select request, you can also do the following:
-//
-// - To expedite your queries, specify the Expedited tier. For more information
-// about tiers, see "Restoring Archives," later in this topic.
-//
-// - Specify details about the data serialization format of both the input
-// object that is being queried and the serialization of the CSV-encoded
-// query results.
-//
-// The following are additional important facts about the select feature:
-//
-// - The output results are new Amazon S3 objects. Unlike archive retrievals,
-// they are stored until explicitly deleted-manually or through a lifecycle
-// configuration.
-//
-// - You can issue more than one select request on the same Amazon S3 object.
-// Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests.
-//
-// - Amazon S3 accepts a select request even if the object has already been
-// restored. A select request doesn’t return error response 409.
-//
// # Permissions
//
// To use this operation, you must have permissions to perform the s3:RestoreObject
@@ -10491,8 +11316,8 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
// response.
//
// - Special errors: Code: RestoreAlreadyInProgress Cause: Object restore
-// is already in progress. (This error does not apply to SELECT type requests.)
-// HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client
+// is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code
+// Prefix: Client
//
// - Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals
// are currently not available. Try again later. (Returned if there is insufficient
@@ -10591,6 +11416,8 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r
// SelectObjectContent API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// This action filters the contents of an Amazon S3 object based on a simple
// structured query language (SQL) statement. In the request, along with the
// SQL expression, you must also specify a data serialization format (JSON,
@@ -10599,7 +11426,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r
// SQL expression. You must also specify the data serialization format for the
// response.
//
-// This action is not supported by Amazon S3 on Outposts.
+// This functionality is not supported for Amazon S3 on Outposts.
//
// For more information about Amazon S3 Select, see Selecting Content from Objects
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html)
@@ -10608,7 +11435,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r
//
// # Permissions
//
-// You must have s3:GetObject permission for this operation. Amazon S3 Select
+// You must have the s3:GetObject permission for this operation. Amazon S3 Select
// does not support anonymous access. For more information about permissions,
// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
// in the Amazon S3 User Guide.
@@ -10921,15 +11748,15 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
//
// Uploads a part in a multipart upload.
//
-// In this operation, you provide part data in your request. However, you have
-// an option to specify your existing Amazon S3 object as a data source for
-// the part you are uploading. To upload a part from an existing object, you
-// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
+// In this operation, you provide new data as a part of an object in your request.
+// However, you have an option to specify your existing Amazon S3 object as
+// a data source for the part you are uploading. To upload a part from an existing
+// object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
// operation.
//
// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html))
// before you can upload any part. In response to your initiate request, Amazon
-// S3 returns an upload ID, a unique identifier, that you must include in your
+// S3 returns an upload ID, a unique identifier that you must include in your
// upload part request.
//
// Part numbers can be any number from 1 to 10,000, inclusive. A part number
@@ -10941,18 +11768,8 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
// in the Amazon S3 User Guide.
//
-// To ensure that data is not corrupted when traversing the network, specify
-// the Content-MD5 header in the upload part request. Amazon S3 checks the part
-// data against the provided MD5 value. If they do not match, Amazon S3 returns
-// an error.
-//
-// If the upload request is signed with Signature Version 4, then Amazon Web
-// Services S3 uses the x-amz-content-sha256 header as a checksum instead of
-// Content-MD5. For more information see Authenticating Requests: Using the
-// Authorization Header (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html).
-//
-// Note: After you initiate multipart upload and upload one or more parts, you
-// must either complete or abort multipart upload in order to stop getting charged
+// After you initiate multipart upload and upload one or more parts, you must
+// either complete or abort multipart upload in order to stop getting charged
// for storage of the uploaded parts. Only after you either complete or abort
// multipart upload, Amazon S3 frees up the parts storage and stops charging
// you for the parts storage.
@@ -10961,50 +11778,88 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the
// Amazon S3 User Guide .
//
-// For information on the permissions required to use the multipart upload API,
-// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon S3 User Guide.
-//
-// Server-side encryption is for data encryption at rest. Amazon S3 encrypts
-// your data as it writes it to disks in its data centers and decrypts it when
-// you access it. You have three mutually exclusive options to protect data
-// using server-side encryption in Amazon S3, depending on how you choose to
-// manage the encryption keys. Specifically, the encryption key options are
-// Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS),
-// and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side
-// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally
-// tell Amazon S3 to encrypt data at rest using server-side encryption with
-// other key options. The option you use depends on whether you want to use
-// KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). If you choose
-// to provide your own encryption key, the request headers you provide in the
-// request must match the headers you used in the request to initiate the upload
-// by using CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
-// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
//
-// Server-side encryption is supported by the S3 Multipart Upload actions. Unless
-// you are using a customer-provided encryption key (SSE-C), you don't need
-// to specify the encryption parameters in each UploadPart request. Instead,
-// you only need to specify the server-side encryption parameters in the initial
-// Initiate Multipart request. For more information, see CreateMultipartUpload
-// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
+// Permissions
//
-// If you requested server-side encryption using a customer-provided encryption
-// key (SSE-C) in your initiate multipart upload request, you must provide identical
-// encryption information in each part upload using the following headers.
+// - General purpose bucket permissions - For information on the permissions
+// required to use the multipart upload API, see Multipart Upload and Permissions
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
+// in the Amazon S3 User Guide.
//
-// - x-amz-server-side-encryption-customer-algorithm
+// - Directory bucket permissions - To grant access to this API operation
+// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
+// API operation for session-based authorization. Specifically, you grant
+// the s3express:CreateSession permission to the directory bucket in a bucket
+// policy or an IAM identity-based policy. Then, you make the CreateSession
+// API call on the bucket to obtain a session token. With the session token
+// in your request header, you can make API requests to this operation. After
+// the session token expires, you make another CreateSession API call to
+// generate a new session token for use. Amazon Web Services CLI or SDKs
+// create session and refresh the session token automatically to avoid service
+// interruptions when a session expires. For more information about authorization,
+// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html).
+//
+// # Data integrity
+//
+// General purpose bucket - To ensure that data is not corrupted traversing
+// the network, specify the Content-MD5 header in the upload part request. Amazon
+// S3 checks the part data against the provided MD5 value. If they do not match,
+// Amazon S3 returns an error. If the upload request is signed with Signature
+// Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header
+// as a checksum instead of Content-MD5. For more information see Authenticating
+// Requests: Using the Authorization Header (Amazon Web Services Signature Version
+// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html).
+//
+// Directory buckets - MD5 is not supported by directory buckets. You can use
+// checksum algorithms to check object integrity.
+//
+// Encryption
+//
+// - General purpose bucket - Server-side encryption is for data encryption
+// at rest. Amazon S3 encrypts your data as it writes it to disks in its
+// data centers and decrypts it when you access it. You have mutually exclusive
+// options to protect data using server-side encryption in Amazon S3, depending
+// on how you choose to manage the encryption keys. Specifically, the encryption
+// key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS
+// keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts
+// data with server-side encryption using Amazon S3 managed keys (SSE-S3)
+// by default. You can optionally tell Amazon S3 to encrypt data at rest
+// using server-side encryption with other key options. The option you use
+// depends on whether you want to use KMS keys (SSE-KMS) or provide your
+// own encryption key (SSE-C). Server-side encryption is supported by the
+// S3 Multipart Upload operations. Unless you are using a customer-provided
+// encryption key (SSE-C), you don't need to specify the encryption parameters
+// in each UploadPart request. Instead, you only need to specify the server-side
+// encryption parameters in the initial Initiate Multipart request. For more
+// information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html).
+// If you request server-side encryption using a customer-provided encryption
+// key (SSE-C) in your initiate multipart upload request, you must provide
+// identical encryption information in each part upload using the following
+// request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key
+// x-amz-server-side-encryption-customer-key-MD5
+//
+// - Directory bucket - For directory buckets, only server-side encryption
+// with Amazon S3 managed keys (SSE-S3) (AES256) is supported.
+//
+// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
+// in the Amazon S3 User Guide.
//
-// - x-amz-server-side-encryption-customer-key
+// Special errors
//
-// - x-amz-server-side-encryption-customer-key-MD5
+// - Error Code: NoSuchUpload Description: The specified multipart upload
+// does not exist. The upload ID might be invalid, or the multipart upload
+// might have been aborted or completed. HTTP Status Code: 404 Not Found
+// SOAP Fault Code Prefix: Client
//
-// UploadPart has the following special errors:
+// # HTTP Host header syntax
//
-// - Code: NoSuchUpload Cause: The specified multipart upload does not exist.
-// The upload ID might be invalid, or the multipart upload might have been
-// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code
-// Prefix: Client
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to UploadPart:
//
@@ -11089,81 +11944,105 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
// UploadPartCopy API operation for Amazon Simple Storage Service.
//
-// Uploads a part by copying data from an existing object as data source. You
-// specify the data source by adding the request header x-amz-copy-source in
-// your request and a byte range by adding the request header x-amz-copy-source-range
+// Uploads a part by copying data from an existing object as data source. To
+// specify the data source, you add the request header x-amz-copy-source in
+// your request. To specify a byte range, you add the request header x-amz-copy-source-range
// in your request.
//
// For information about maximum and minimum part sizes and other multipart
// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
// in the Amazon S3 User Guide.
//
-// Instead of using an existing object as part data, you might use the UploadPart
-// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action
-// and provide data in your request.
+// Instead of copying data from an existing object as part data, you might use
+// the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
+// action to upload new data as a part of an object in your request.
//
// You must initiate a multipart upload before you can upload any part. In response
-// to your initiate request. Amazon S3 returns a unique identifier, the upload
-// ID, that you must include in your upload part request.
+// to your initiate request, Amazon S3 returns the upload ID, a unique identifier
+// that you must include in your upload part request.
+//
+// For conceptual information about multipart uploads, see Uploading Objects
+// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
+// in the Amazon S3 User Guide. For information about copying objects using
+// a single atomic action vs. a multipart upload, see Operations on Objects
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in
+// the Amazon S3 User Guide.
//
-// For more information about using the UploadPartCopy operation, see the following:
+// Directory buckets - For directory buckets, you must make requests for this
+// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style
+// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name
+// . Path-style requests are not supported. For more information, see Regional
+// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
+// in the Amazon S3 User Guide.
//
-// - For conceptual information about multipart uploads, see Uploading Objects
-// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
-// in the Amazon S3 User Guide.
+// # Authentication and authorization
//
-// - For information about permissions required to use the multipart upload
-// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
-// in the Amazon S3 User Guide.
+// All UploadPartCopy requests must be authenticated and signed by using IAM
+// credentials (access key ID and secret access key for the IAM identities).
+// All headers with the x-amz- prefix, including x-amz-copy-source, must be
+// signed. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
//
-// - For information about copying objects using a single atomic action vs.
-// a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html)
-// in the Amazon S3 User Guide.
+// Directory buckets - You must use IAM credentials to authenticate and authorize
+// your access to the UploadPartCopy API operation, instead of using the temporary
+// security credentials through the CreateSession API operation.
//
-// - For information about using server-side encryption with customer-provided
-// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
-// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html).
+// Amazon Web Services CLI or SDKs handles authentication and authorization
+// on your behalf.
//
-// Note the following additional considerations about the request headers x-amz-copy-source-if-match,
-// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and
-// x-amz-copy-source-if-modified-since:
+// # Permissions
//
-// - Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
-// headers are present in the request as follows: x-amz-copy-source-if-match
-// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since
-// condition evaluates to false; Amazon S3 returns 200 OK and copies the
-// data.
+// You must have READ access to the source object and WRITE access to the destination
+// bucket.
//
-// - Consideration 2 - If both of the x-amz-copy-source-if-none-match and
-// x-amz-copy-source-if-modified-since headers are present in the request
-// as follows: x-amz-copy-source-if-none-match condition evaluates to false,
-// and; x-amz-copy-source-if-modified-since condition evaluates to true;
-// Amazon S3 returns 412 Precondition Failed response code.
+// - General purpose bucket permissions - You must have the permissions in
+// a policy based on the bucket types of your source bucket and destination
+// bucket in an UploadPartCopy operation. If the source object is in a general
+// purpose bucket, you must have the s3:GetObject permission to read the
+// source object that is being copied. If the destination bucket is a general
+// purpose bucket, you must have the s3:PutObject permission to write the
+// object copy to the destination bucket. For information about permissions
+// required to use the multipart upload API, see Multipart upload API and
+// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions)
+// in the Amazon S3 User Guide.
//
-// # Versioning
+// - Directory bucket permissions - You must have permissions in a bucket
+// policy or an IAM identity-based policy based on the source and destination
+// bucket types in an UploadPartCopy operation. If the source object that
+// you want to copy is in a directory bucket, you must have the s3express:CreateSession
+// permission in the Action element of a policy to read the object. By default,
+// the session is in the ReadWrite mode. If you want to restrict the access,
+// you can explicitly set the s3express:SessionMode condition key to ReadOnly
+// on the copy source bucket. If the copy destination is a directory bucket,
+// you must have the s3express:CreateSession permission in the Action element
+// of a policy to write the object to the destination. The s3express:SessionMode
+// condition key cannot be set to ReadOnly on the copy destination. For example
+// policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
+// and Amazon Web Services Identity and Access Management (IAM) identity-based
+// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
+// in the Amazon S3 User Guide.
//
-// If your bucket has versioning enabled, you could have multiple versions of
-// the same object. By default, x-amz-copy-source identifies the current version
-// of the object to copy. If the current version is a delete marker and you
-// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404
-// error, because the object does not exist. If you specify versionId in the
-// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns
-// an HTTP 400 error, because you are not allowed to specify a delete marker
-// as a version for the x-amz-copy-source.
+// Encryption
//
-// You can optionally specify a specific version of the source object to copy
-// by adding the versionId subresource as shown in the following example:
+// - General purpose buckets - For information about using server-side encryption
+// with customer-provided encryption keys with the UploadPartCopy operation,
+// see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
+// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html).
//
-// x-amz-copy-source: /bucket/object?versionId=version id
+// - Directory buckets - For directory buckets, only server-side encryption
+// with Amazon S3 managed keys (SSE-S3) (AES256) is supported.
//
// Special errors
//
-// - Code: NoSuchUpload Cause: The specified multipart upload does not exist.
-// The upload ID might be invalid, or the multipart upload might have been
-// aborted or completed. HTTP Status Code: 404 Not Found
+// - Error Code: NoSuchUpload Description: The specified multipart upload
+// does not exist. The upload ID might be invalid, or the multipart upload
+// might have been aborted or completed. HTTP Status Code: 404 Not Found
+//
+// - Error Code: InvalidRequest Description: The specified copy source is
+// not supported as a byte-range copy source. HTTP Status Code: 400 Bad Request
//
-// - Code: InvalidRequest Cause: The specified copy source is not supported
-// as a byte-range copy source. HTTP Status Code: 400 Bad Request
+// # HTTP Host header syntax
+//
+// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.
//
// The following operations are related to UploadPartCopy:
//
@@ -11256,6 +12135,8 @@ func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) (
// WriteGetObjectResponse API operation for Amazon Simple Storage Service.
//
+// This operation is not supported by directory buckets.
+//
// Passes transformed objects to a GetObject operation when using Object Lambda
// access points. For information about Object Lambda access points, see Transforming
// objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html)
@@ -11370,27 +12251,41 @@ type AbortMultipartUploadInput struct {
// The bucket name to which the upload was taking place.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Key of the object for which the multipart upload was initiated.
@@ -11399,10 +12294,14 @@ type AbortMultipartUploadInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Upload ID that identifies the multipart upload.
@@ -11523,6 +12422,8 @@ type AbortMultipartUploadOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
}
@@ -12057,9 +12958,7 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes
return s
}
-// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name
-// is globally unique, and the namespace is shared by all Amazon Web Services
-// accounts.
+// In terms of implementation, a Bucket is a resource.
type Bucket struct {
_ struct{} `type:"structure"`
@@ -12101,6 +13000,51 @@ func (s *Bucket) SetName(v string) *Bucket {
return s
}
+// Specifies the information about the bucket that will be created. For more
+// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
+// in the Amazon S3 User Guide.
+//
+// This functionality is only supported by directory buckets.
+type BucketInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The number of Availability Zone that's used for redundancy for the bucket.
+ DataRedundancy *string `type:"string" enum:"DataRedundancy"`
+
+ // The type of bucket.
+ Type *string `type:"string" enum:"BucketType"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s BucketInfo) GoString() string {
+ return s.String()
+}
+
+// SetDataRedundancy sets the DataRedundancy field's value.
+func (s *BucketInfo) SetDataRedundancy(v string) *BucketInfo {
+ s.DataRedundancy = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *BucketInfo) SetType(v string) *BucketInfo {
+ s.Type = &v
+ return s
+}
+
// Specifies the lifecycle configuration for objects in an Amazon S3 bucket.
// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
// in the Amazon S3 User Guide.
@@ -12572,34 +13516,42 @@ type Checksum struct {
_ struct{} `type:"structure"`
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use the API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `type:"string"`
}
@@ -12759,19 +13711,33 @@ type CompleteMultipartUploadInput struct {
// Name of the bucket to which the multipart upload was initiated.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -12805,9 +13771,9 @@ type CompleteMultipartUploadInput struct {
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Object key for which the multipart upload was initiated.
@@ -12819,16 +13785,23 @@ type CompleteMultipartUploadInput struct {
MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The server-side encryption (SSE) algorithm used to encrypt the object. This
- // parameter is needed only when the object was created using a checksum algorithm.
- // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
+ // parameter is required only when the object was created using a checksum algorithm
+ // or if your bucket policy requires the use of SSE-C. For more information,
+ // see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// The server-side encryption (SSE) customer managed key. This parameter is
@@ -12836,6 +13809,8 @@ type CompleteMultipartUploadInput struct {
// information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CompleteMultipartUploadInput's
// String and GoString methods.
@@ -12845,6 +13820,8 @@ type CompleteMultipartUploadInput struct {
// is needed only when the object was created using a checksum algorithm. For
// more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// ID for the initiated multipart upload.
@@ -13021,55 +13998,52 @@ type CompleteMultipartUploadOutput struct {
// The name of the bucket that contains the newly created object. Does not return
// the access point ARN or access point alias if used.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
- //
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // Access points are not supported by directory buckets.
Bucket *string `type:"string"`
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use the API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `type:"string"`
@@ -13085,6 +14059,8 @@ type CompleteMultipartUploadOutput struct {
// If the object expiration is configured, this will contain the expiration
// date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded.
+ //
+ // This functionality is not supported for directory buckets.
Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
// The object key of the newly created object.
@@ -13095,11 +14071,15 @@ type CompleteMultipartUploadOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
- // If present, specifies the ID of the Key Management Service (KMS) symmetric
+ // If present, indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's
// String and GoString methods.
@@ -13107,10 +14087,15 @@ type CompleteMultipartUploadOutput struct {
// The server-side encryption algorithm used when storing this object in Amazon
// S3 (for example, AES256, aws:kms).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// Version ID of the newly created object, in case the bucket has versioning
// turned on.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
}
@@ -13263,34 +14248,42 @@ type CompletedPart struct {
_ struct{} `type:"structure"`
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use the API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `type:"string"`
@@ -13299,6 +14292,16 @@ type CompletedPart struct {
// Part number that identifies the part. This is a positive integer between
// 1 and 10,000.
+ //
+ // * General purpose buckets - In CompleteMultipartUpload, when a additional
+ // checksum (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1,
+ // or x-amz-checksum-sha256) is applied to each part, the PartNumber must
+ // start at 1 and the part numbers must be consecutive. Otherwise, Amazon
+ // S3 generates an HTTP 400 Bad Request status code and an InvalidPartOrder
+ // error code.
+ //
+ // * Directory buckets - In CompleteMultipartUpload, the PartNumber must
+ // start at 1 and the part numbers must be consecutive.
PartNumber *int64 `type:"integer"`
}
@@ -13458,26 +14461,60 @@ func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg even
type CopyObjectInput struct {
_ struct{} `locationName:"CopyObjectRequest" type:"structure"`
- // The canned ACL to apply to the object.
+ // The canned access control list (ACL) to apply to the object.
+ //
+ // When you copy an object, the ACL metadata is not preserved and is set to
+ // private by default. Only the owner has full access control. To override the
+ // default ACL setting, specify a new ACL when you generate a copy request.
+ // For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
+ //
+ // If the destination bucket that you're copying objects to uses the bucket
+ // owner enforced setting for S3 Object Ownership, ACLs are disabled and no
+ // longer affect permissions. Buckets that use this setting only accept PUT
+ // requests that don't specify an ACL or PUT requests that specify bucket owner
+ // full control ACLs, such as the bucket-owner-full-control canned ACL or an
+ // equivalent form of this ACL expressed in the XML format. For more information,
+ // see Controlling ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
+ // in the Amazon S3 User Guide.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * If your destination bucket uses the bucket owner enforced setting for
+ // Object Ownership, all objects written to the bucket by any account will
+ // be owned by the bucket owner.
+ //
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
// The name of the destination bucket.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -13485,44 +14522,73 @@ type CopyObjectInput struct {
// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
// with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
+ // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the
+ // object.
+ //
// Setting this header to true causes Amazon S3 to use an S3 Bucket Key for
- // object encryption with SSE-KMS.
+ // object encryption with SSE-KMS. Specifying this header with a COPY action
+ // doesn’t affect bucket-level settings for S3 Bucket Key.
//
- // Specifying this header with a COPY action doesn’t affect bucket-level settings
- // for S3 Bucket Key.
+ // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
- // Specifies caching behavior along the request/reply chain.
+ // Specifies the caching behavior along the request/reply chain.
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
- // Indicates the algorithm you want Amazon S3 to use to create the checksum
+ // Indicates the algorithm that you want Amazon S3 to use to create the checksum
// for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
+ //
+ // When you copy an object, if the source object has a checksum, that checksum
+ // value will be copied to the new object by default. If the CopyObject request
+ // does not include this x-amz-checksum-algorithm header, the checksum algorithm
+ // will be copied from the source object to the destination object (if it's
+ // present on the source object). You can optionally specify a different checksum
+ // algorithm to use with the x-amz-checksum-algorithm header. Unrecognized or
+ // unsupported values will respond with the HTTP status code 400 Bad Request.
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // Specifies presentational information for the object.
+ // Specifies presentational information for the object. Indicates whether an
+ // object should be displayed in a web browser or downloaded as a file. It allows
+ // specifying the desired filename for the downloaded file.
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
// Specifies what content encodings have been applied to the object and thus
// what decoding mechanisms must be applied to obtain the media-type referenced
// by the Content-Type header field.
+ //
+ // For directory buckets, only the aws-chunked value is supported in this header
+ // field.
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
// The language the content is in.
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
- // A standard MIME type describing the format of the object data.
+ // A standard MIME type that describes the format of the object data.
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
- // Specifies the source object for the copy operation. You specify the value
- // in one of two formats, depending on whether you want to access the source
- // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html):
+ // Specifies the source object for the copy operation. The source object can
+ // be up to 5 GB. If the source object is an object that was uploaded by using
+ // a multipart upload, the object copy will be a single part object after the
+ // source object is copied to the destination bucket.
+ //
+ // You specify the value of the copy source in one of two formats, depending
+ // on whether you want to access the source object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html):
//
// * For objects not accessed through an access point, specify the name of
// the source bucket and the key of the source object, separated by a slash
- // (/). For example, to copy the object reports/january.pdf from the bucket
- // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value
- // must be URL-encoded.
+ // (/). For example, to copy the object reports/january.pdf from the general
+ // purpose bucket awsexamplebucket, use awsexamplebucket/reports/january.pdf.
+ // The value must be URL-encoded. To copy the object reports/january.pdf
+ // from the directory bucket awsexamplebucket--use1-az5--x-s3, use awsexamplebucket--use1-az5--x-s3/reports/january.pdf.
+ // The value must be URL-encoded.
//
// * For objects accessed through access points, specify the Amazon Resource
// Name (ARN) of the object as accessed through the access point, in the
@@ -13531,43 +14597,104 @@ type CopyObjectInput struct {
// my-access-point owned by account 123456789012 in Region us-west-2, use
// the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf.
// The value must be URL encoded. Amazon S3 supports copy operations using
- // access points only when the source and destination buckets are in the
- // same Amazon Web Services Region. Alternatively, for objects accessed through
- // Amazon S3 on Outposts, specify the ARN of the object as accessed in the
- // format arn:aws:s3-outposts:::outpost//object/.
+ // Access points only when the source and destination buckets are in the
+ // same Amazon Web Services Region. Access points are not supported by directory
+ // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts,
+ // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/.
// For example, to copy the object reports/january.pdf through outpost my-outpost
// owned by account 123456789012 in Region us-west-2, use the URL encoding
// of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf.
// The value must be URL-encoded.
//
- // To copy a specific version of an object, append ?versionId= to
- // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).
+ // If your source bucket versioning is enabled, the x-amz-copy-source header
+ // by default identifies the current version of an object to copy. If the current
+ // version is a delete marker, Amazon S3 behaves as if the object was deleted.
+ // To copy a different version, use the versionId query parameter. Specifically,
+ // append ?versionId= to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).
// If you don't specify a version ID, Amazon S3 copies the latest version of
// the source object.
//
+ // If you enable versioning on the destination bucket, Amazon S3 generates a
+ // unique version ID for the copied object. This version ID is different from
+ // the version ID of the source object. Amazon S3 returns the version ID of
+ // the copied object in the x-amz-version-id response header in the response.
+ //
+ // If you do not enable versioning or suspend it on the destination bucket,
+ // the version ID that Amazon S3 generates in the x-amz-version-id response
+ // header is always null.
+ //
+ // Directory buckets - S3 Versioning isn't enabled and supported for directory
+ // buckets.
+ //
// CopySource is a required field
CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
// Copies the object if its entity tag (ETag) matches the specified tag.
+ //
+ // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+ // headers are present in the request and evaluate as follows, Amazon S3 returns
+ // 200 OK and copies the data:
+ //
+ // * x-amz-copy-source-if-match condition evaluates to true
+ //
+ // * x-amz-copy-source-if-unmodified-since condition evaluates to false
CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
// Copies the object if it has been modified since the specified time.
+ //
+ // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since
+ // headers are present in the request and evaluate as follows, Amazon S3 returns
+ // the 412 Precondition Failed response code:
+ //
+ // * x-amz-copy-source-if-none-match condition evaluates to false
+ //
+ // * x-amz-copy-source-if-modified-since condition evaluates to true
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
// Copies the object if its entity tag (ETag) is different than the specified
// ETag.
+ //
+ // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since
+ // headers are present in the request and evaluate as follows, Amazon S3 returns
+ // the 412 Precondition Failed response code:
+ //
+ // * x-amz-copy-source-if-none-match condition evaluates to false
+ //
+ // * x-amz-copy-source-if-modified-since condition evaluates to true
CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
// Copies the object if it hasn't been modified since the specified time.
+ //
+ // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+ // headers are present in the request and evaluate as follows, Amazon S3 returns
+ // 200 OK and copies the data:
+ //
+ // * x-amz-copy-source-if-match condition evaluates to true
+ //
+ // * x-amz-copy-source-if-unmodified-since condition evaluates to false
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
// Specifies the algorithm to use when decrypting the source object (for example,
// AES256).
+ //
+ // If the source object for the copy is stored in Amazon S3 using SSE-C, you
+ // must provide the necessary encryption information in your request so that
+ // Amazon S3 can decrypt the object for copying.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
- // the source object. The encryption key provided in this header must be one
- // that was used when the source object was created.
+ // the source object. The encryption key provided in this header must be the
+ // same one that was used when the source object was created.
+ //
+ // If the source object for the copy is stored in Amazon S3 using SSE-C, you
+ // must provide the necessary encryption information in your request so that
+ // Amazon S3 can decrypt the object for copying.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
//
// CopySourceSSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CopyObjectInput's
@@ -13577,16 +14704,23 @@ type CopyObjectInput struct {
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
+ //
+ // If the source object for the copy is stored in Amazon S3 using SSE-C, you
+ // must provide the necessary encryption information in your request so that
+ // Amazon S3 can decrypt the object for copying.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
- // The account ID of the expected destination bucket owner. If the destination
- // bucket is owned by a different account, the request fails with the HTTP status
- // code 403 Forbidden (access denied).
+ // The account ID of the expected destination bucket owner. If the account ID
+ // that you provide does not match the actual owner of the destination bucket,
+ // the request fails with the HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
- // The account ID of the expected source bucket owner. If the source bucket
- // is owned by a different account, the request fails with the HTTP status code
- // 403 Forbidden (access denied).
+ // The account ID of the expected source bucket owner. If the account ID that
+ // you provide does not match the actual owner of the source bucket, the request
+ // fails with the HTTP status code 403 Forbidden (access denied).
ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"`
// The date and time at which the object is no longer cacheable.
@@ -13594,22 +14728,30 @@ type CopyObjectInput struct {
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to read the object data and its metadata.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the object ACL.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to write the ACL for the applicable object.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// The key of the destination object.
@@ -13621,35 +14763,69 @@ type CopyObjectInput struct {
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// Specifies whether the metadata is copied from the source object or replaced
- // with metadata provided in the request.
+ // with metadata that's provided in the request. When copying an object, you
+ // can preserve all metadata (the default) or specify new metadata. If this
+ // header isn’t specified, COPY is the default behavior.
+ //
+ // General purpose bucket - For general purpose buckets, when you grant permissions,
+ // you can use the s3:x-amz-metadata-directive condition key to enforce certain
+ // metadata behavior when objects are uploaded. For more information, see Amazon
+ // S3 condition key examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html)
+ // in the Amazon S3 User Guide.
+ //
+ // x-amz-website-redirect-location is unique to each object and is not copied
+ // when using the x-amz-metadata-directive header. To copy the value, you must
+ // specify x-amz-website-redirect-location in the request header.
MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"`
- // Specifies whether you want to apply a legal hold to the copied object.
+ // Specifies whether you want to apply a legal hold to the object copy.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
- // The Object Lock mode that you want to apply to the copied object.
+ // The Object Lock mode that you want to apply to the object copy.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
- // The date and time when you want the copied object's Object Lock to expire.
+ // The date and time when you want the Object Lock of the object copy to expire.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (for example,
- // AES256).
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // When you perform a CopyObject operation, if you want to use a different type
+ // of encryption setting for the target object, you can specify appropriate
+ // encryption-related headers to encrypt the target object with an Amazon S3
+ // managed key, a KMS key, or a customer-provided key. If the encryption setting
+ // in your request is different from the default encryption configuration of
+ // the destination bucket, the encryption setting in your request takes precedence.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
- // data. This value is used to store the object and then it is discarded; Amazon
+ // data. This value is used to store the object and then it is discarded. Amazon
// S3 does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
//
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ //
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CopyObjectInput's
// String and GoString methods.
@@ -13658,55 +14834,201 @@ type CopyObjectInput struct {
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
- // JSON with the encryption context key-value pairs.
+ // JSON with the encryption context key-value pairs. This value must be explicitly
+ // added to specify encryption context for CopyObject requests.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
//
// SSEKMSEncryptionContext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CopyObjectInput's
// String and GoString methods.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
- // Specifies the KMS key ID to use for object encryption. All GET and PUT requests
- // for an object protected by KMS will fail if they're not made via SSL or using
- // SigV4. For information about configuring any of the officially supported
- // Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the
- // Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
+ // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption.
+ // All GET and PUT requests for an object protected by KMS will fail if they're
+ // not made via SSL or using SigV4. For information about configuring any of
+ // the officially supported Amazon Web Services SDKs and Amazon Web Services
+ // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
// in the Amazon S3 User Guide.
//
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ //
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CopyObjectInput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
// The server-side encryption algorithm used when storing this object in Amazon
- // S3 (for example, AES256, aws:kms, aws:kms:dsse).
+ // S3 (for example, AES256, aws:kms, aws:kms:dsse). Unrecognized or unsupported
+ // values won’t write a destination object and will receive a 400 Bad Request
+ // response.
+ //
+ // Amazon S3 automatically encrypts all new objects that are copied to an S3
+ // bucket. When copying an object, if you don't specify encryption information
+ // in your copy request, the encryption setting of the target object is set
+ // to the default encryption configuration of the destination bucket. By default,
+ // all buckets have a base level of encryption configuration that uses server-side
+ // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket
+ // has a default encryption configuration that uses server-side encryption with
+ // Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption
+ // with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with
+ // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding
+ // KMS key, or a customer-provided key to encrypt the target object copy.
+ //
+ // When you perform a CopyObject operation, if you want to use a different type
+ // of encryption setting for the target object, you can specify appropriate
+ // encryption-related headers to encrypt the target object with an Amazon S3
+ // managed key, a KMS key, or a customer-provided key. If the encryption setting
+ // in your request is different from the default encryption configuration of
+ // the destination bucket, the encryption setting in your request takes precedence.
+ //
+ // With server-side encryption, Amazon S3 encrypts your data as it writes your
+ // data to disks in its data centers and decrypts the data when you access it.
+ // For more information about server-side encryption, see Using Server-Side
+ // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
+ // in the Amazon S3 User Guide.
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
- // By default, Amazon S3 uses the STANDARD Storage Class to store newly created
- // objects. The STANDARD storage class provides high durability and high availability.
- // Depending on performance needs, you can specify a different Storage Class.
- // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
- // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+ // If the x-amz-storage-class header is not used, the copied object will be
+ // stored in the STANDARD Storage Class by default. The STANDARD storage class
+ // provides high durability and high availability. Depending on performance
+ // needs, you can specify a different Storage Class.
+ //
+ // * Directory buckets - For directory buckets, only the S3 Express One Zone
+ // storage class is supported to store newly created objects. Unsupported
+ // storage class values won't write a destination object and will respond
+ // with the HTTP status code 400 Bad Request.
+ //
+ // * Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage
+ // Class.
+ //
+ // You can use the CopyObject action to change the storage class of an object
+ // that is already stored in Amazon S3 by using the x-amz-storage-class header.
+ // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Before using an object as a source object for the copy operation, you must
+ // restore a copy of it if it meets any of the following conditions:
+ //
+ // * The storage class of the source object is GLACIER or DEEP_ARCHIVE.
+ //
+ // * The storage class of the source object is INTELLIGENT_TIERING and it's
+ // S3 Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition)
+ // is Archive Access or Deep Archive Access.
+ //
+ // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
+ // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html)
// in the Amazon S3 User Guide.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
- // The tag-set for the object destination object this value must be used in
- // conjunction with the TaggingDirective. The tag-set must be encoded as URL
- // Query parameters.
+ // The tag-set for the object copy in the destination bucket. This value must
+ // be used in conjunction with the x-amz-tagging-directive if you choose REPLACE
+ // for the x-amz-tagging-directive. If you choose COPY for the x-amz-tagging-directive,
+ // you don't need to set the x-amz-tagging header, because the tag-set will
+ // be copied from the source object directly. The tag-set must be encoded as
+ // URL Query parameters.
+ //
+ // The default value is the empty value.
+ //
+ // Directory buckets - For directory buckets in a CopyObject operation, only
+ // the empty tag-set is supported. Any requests that attempt to write non-empty
+ // tags into directory buckets will receive a 501 Not Implemented status code.
+ // When the destination bucket is a directory bucket, you will receive a 501
+ // Not Implemented response in any of the following situations:
+ //
+ // * When you attempt to COPY the tag-set from an S3 source object that has
+ // non-empty tags.
+ //
+ // * When you attempt to REPLACE the tag-set of a source object and set a
+ // non-empty value to x-amz-tagging.
+ //
+ // * When you don't set the x-amz-tagging-directive header and the source
+ // object has non-empty tags. This is because the default value of x-amz-tagging-directive
+ // is COPY.
+ //
+ // Because only the empty tag-set is supported for directory buckets in a CopyObject
+ // operation, the following situations are allowed:
+ //
+ // * When you attempt to COPY the tag-set from a directory bucket source
+ // object that has no tags to a general purpose bucket. It copies an empty
+ // tag-set to the destination object.
+ //
+ // * When you attempt to REPLACE the tag-set of a directory bucket source
+ // object and set the x-amz-tagging value of the directory bucket destination
+ // object to empty.
+ //
+ // * When you attempt to REPLACE the tag-set of a general purpose bucket
+ // source object that has non-empty tags and set the x-amz-tagging value
+ // of the directory bucket destination object to empty.
+ //
+ // * When you attempt to REPLACE the tag-set of a directory bucket source
+ // object and don't set the x-amz-tagging value of the directory bucket destination
+ // object. This is because the default value of x-amz-tagging is the empty
+ // value.
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
- // Specifies whether the object tag-set are copied from the source object or
- // replaced with tag-set provided in the request.
+ // Specifies whether the object tag-set is copied from the source object or
+ // replaced with the tag-set that's provided in the request.
+ //
+ // The default value is COPY.
+ //
+ // Directory buckets - For directory buckets in a CopyObject operation, only
+ // the empty tag-set is supported. Any requests that attempt to write non-empty
+ // tags into directory buckets will receive a 501 Not Implemented status code.
+ // When the destination bucket is a directory bucket, you will receive a 501
+ // Not Implemented response in any of the following situations:
+ //
+ // * When you attempt to COPY the tag-set from an S3 source object that has
+ // non-empty tags.
+ //
+ // * When you attempt to REPLACE the tag-set of a source object and set a
+ // non-empty value to x-amz-tagging.
+ //
+ // * When you don't set the x-amz-tagging-directive header and the source
+ // object has non-empty tags. This is because the default value of x-amz-tagging-directive
+ // is COPY.
+ //
+ // Because only the empty tag-set is supported for directory buckets in a CopyObject
+ // operation, the following situations are allowed:
+ //
+ // * When you attempt to COPY the tag-set from a directory bucket source
+ // object that has no tags to a general purpose bucket. It copies an empty
+ // tag-set to the destination object.
+ //
+ // * When you attempt to REPLACE the tag-set of a directory bucket source
+ // object and set the x-amz-tagging value of the directory bucket destination
+ // object to empty.
+ //
+ // * When you attempt to REPLACE the tag-set of a general purpose bucket
+ // source object that has non-empty tags and set the x-amz-tagging value
+ // of the directory bucket destination object to empty.
+ //
+ // * When you attempt to REPLACE the tag-set of a directory bucket source
+ // object and don't set the x-amz-tagging value of the directory bucket destination
+ // object. This is because the default value of x-amz-tagging is the empty
+ // value.
TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"`
- // If the bucket is configured as a website, redirects requests for this object
- // to another object in the same bucket or to an external URL. Amazon S3 stores
- // the value of this header in the object metadata. This value is unique to
- // each object and is not copied when using the x-amz-metadata-directive header.
- // Instead, you may opt to provide this header in combination with the directive.
+ // If the destination bucket is configured as a website, redirects requests
+ // for this object copy to another object in the same bucket or to an external
+ // URL. Amazon S3 stores the value of this header in the object metadata. This
+ // value is unique to each object and is not copied when using the x-amz-metadata-directive
+ // header. Instead, you may opt to provide this header in combination with the
+ // x-amz-metadata-directive header.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}
@@ -14052,53 +15374,75 @@ type CopyObjectOutput struct {
// Indicates whether the copied object uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// Container for all response elements.
CopyObjectResult *CopyObjectResult `type:"structure"`
- // Version of the copied object in the destination bucket.
+ // Version ID of the source object that was copied.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
// If the object expiration is configured, the response includes this header.
+ //
+ // This functionality is not supported for directory buckets.
Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header confirming the encryption algorithm
- // used.
+ // the response will include this header to confirm the encryption algorithm
+ // that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round-trip message integrity
+ // the response will include this header to provide the round-trip message integrity
// verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the Amazon Web Services KMS Encryption Context to use
+ // If present, indicates the Amazon Web Services KMS Encryption Context to use
// for object encryption. The value of this header is a base64-encoded UTF-8
// string holding JSON with the encryption context key-value pairs.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSEncryptionContext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CopyObjectOutput's
// String and GoString methods.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
- // If present, specifies the ID of the Key Management Service (KMS) symmetric
+ // If present, indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CopyObjectOutput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
+ // The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256, aws:kms, aws:kms:dsse).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// Version ID of the newly created copy.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
}
@@ -14191,34 +15535,26 @@ type CopyObjectResult struct {
_ struct{} `type:"structure"`
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. For more information, see Checking
+ // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. For more information, see
+ // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. For more information, see Checking
+ // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. For more information, see
+ // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `type:"string"`
@@ -14289,34 +15625,42 @@ type CopyPartResult struct {
_ struct{} `type:"structure"`
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use the API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `type:"string"`
@@ -14385,8 +15729,29 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult {
type CreateBucketConfiguration struct {
_ struct{} `type:"structure"`
- // Specifies the Region where the bucket will be created. If you don't specify
- // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1).
+ // Specifies the information about the bucket that will be created.
+ //
+ // This functionality is only supported by directory buckets.
+ Bucket *BucketInfo `type:"structure"`
+
+ // Specifies the location where the bucket will be created.
+ //
+ // For directory buckets, the location type is Availability Zone.
+ //
+ // This functionality is only supported by directory buckets.
+ Location *LocationInfo `type:"structure"`
+
+ // Specifies the Region where the bucket will be created. You might choose a
+ // Region to optimize latency, minimize costs, or address regulatory requirements.
+ // For example, if you reside in Europe, you will probably find it advantageous
+ // to create buckets in the Europe (Ireland) Region. For more information, see
+ // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
+ // in the Amazon S3 User Guide.
+ //
+ // If you don't specify a Region, the bucket is created in the US East (N. Virginia)
+ // Region (us-east-1) by default.
+ //
+ // This functionality is not supported for directory buckets.
LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
}
@@ -14408,6 +15773,22 @@ func (s CreateBucketConfiguration) GoString() string {
return s.String()
}
+// SetBucket sets the Bucket field's value.
+func (s *CreateBucketConfiguration) SetBucket(v *BucketInfo) *CreateBucketConfiguration {
+ s.Bucket = v
+ return s
+}
+
+func (s *CreateBucketConfiguration) getBucket() (v *BucketInfo) {
+ return s.Bucket
+}
+
+// SetLocation sets the Location field's value.
+func (s *CreateBucketConfiguration) SetLocation(v *LocationInfo) *CreateBucketConfiguration {
+ s.Location = v
+ return s
+}
+
// SetLocationConstraint sets the LocationConstraint field's value.
func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration {
s.LocationConstraint = &v
@@ -14418,10 +15799,25 @@ type CreateBucketInput struct {
_ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"`
// The canned ACL to apply to the bucket.
+ //
+ // This functionality is not supported for directory buckets.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
// The name of the bucket to create.
//
+ // General purpose buckets - For information about bucket naming restrictions,
+ // see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+ // . Virtual-hosted-style requests aren't supported. Directory bucket names
+ // must be unique in the chosen Availability Zone. Bucket names must also follow
+ // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3).
+ // For information about bucket naming restrictions, see Directory bucket naming
+ // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -14430,24 +15826,36 @@ type CreateBucketInput struct {
// Allows grantee the read, write, read ACP, and write ACP permissions on the
// bucket.
+ //
+ // This functionality is not supported for directory buckets.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to list the objects in the bucket.
+ //
+ // This functionality is not supported for directory buckets.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the bucket ACL.
+ //
+ // This functionality is not supported for directory buckets.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to create new objects in the bucket.
//
// For the bucket and object owners of existing objects, also allows deletions
// and overwrites of those objects.
+ //
+ // This functionality is not supported for directory buckets.
GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
// Allows grantee to write the ACL for the applicable bucket.
+ //
+ // This functionality is not supported for directory buckets.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Specifies whether you want S3 Object Lock to be enabled for the new bucket.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"`
// The container element for object ownership for a bucket's ownership controls.
@@ -14462,8 +15870,19 @@ type CreateBucketInput struct {
// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer
// affect permissions. The bucket owner automatically owns and has full control
// over every object in the bucket. The bucket only accepts PUT requests that
- // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control
- // canned ACL or an equivalent form of this ACL expressed in the XML format.
+ // don't specify an ACL or specify bucket owner full control ACLs (such as the
+ // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format
+ // that grants the same permissions).
+ //
+ // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled.
+ // We recommend keeping ACLs disabled, except in uncommon use cases where you
+ // must control access for each object individually. For more information about
+ // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs
+ // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets. Directory buckets
+ // use the bucket owner enforced setting for S3 Object Ownership.
ObjectOwnership *string `location:"header" locationName:"x-amz-object-ownership" type:"string" enum:"ObjectOwnership"`
}
@@ -14602,26 +16021,54 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
type CreateMultipartUploadInput struct {
_ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"`
- // The canned ACL to apply to the object.
+ // The canned ACL to apply to the object. Amazon S3 supports a set of predefined
+ // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees
+ // and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
+ // in the Amazon S3 User Guide.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can grant access permissions to individual
+ // Amazon Web Services accounts or to predefined groups defined by Amazon S3.
+ // These permissions are then added to the access control list (ACL) on the
+ // new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html).
+ // One way to grant the permissions using the request headers is to specify
+ // a canned ACL with the x-amz-acl request header.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
- // The name of the bucket to which to initiate the upload
+ // The name of the bucket where the multipart upload is initiated and where
+ // the object is uploaded.
+ //
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -14634,12 +16081,14 @@ type CreateMultipartUploadInput struct {
//
// Specifying this header with an object action doesn’t affect bucket-level
// settings for S3 Bucket Key.
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// Specifies caching behavior along the request/reply chain.
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
- // Indicates the algorithm you want Amazon S3 to use to create the checksum
+ // Indicates the algorithm that you want Amazon S3 to use to create the checksum
// for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
@@ -14650,40 +16099,175 @@ type CreateMultipartUploadInput struct {
// Specifies what content encodings have been applied to the object and thus
// what decoding mechanisms must be applied to obtain the media-type referenced
// by the Content-Type header field.
+ //
+ // For directory buckets, only the aws-chunked value is supported in this header
+ // field.
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
- // The language the content is in.
+ // The language that the content is in.
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
// A standard MIME type describing the format of the object data.
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The date and time at which the object is no longer cacheable.
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
- // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ // Specify access permissions explicitly to give the grantee READ, READ_ACP,
+ // and WRITE_ACP permissions on the object.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header
+ // maps to specific permissions that Amazon S3 supports in an ACL. For more
+ // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+ // in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
+ // * id – if the value specified is the canonical user ID of an Amazon
+ // Web Services account
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * uri – if you are granting permissions to a predefined group
+ //
+ // * emailAddress – if the value specified is the email address of an Amazon
+ // Web Services account Using email addresses to specify a grantee is only
+ // supported in the following Amazon Web Services Regions: US East (N. Virginia)
+ // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia
+ // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São
+ // Paulo) For a list of all the Amazon S3 supported Regions and endpoints,
+ // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+ // in the Amazon Web Services General Reference.
+ //
+ // For example, the following x-amz-grant-read header grants the Amazon Web
+ // Services accounts identified by account IDs permissions to read object data
+ // and its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
- // Allows grantee to read the object data and its metadata.
+ // Specify access permissions explicitly to allow grantee to read the object
+ // data and its metadata.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header
+ // maps to specific permissions that Amazon S3 supports in an ACL. For more
+ // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+ // in the Amazon S3 User Guide.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
+ // * id – if the value specified is the canonical user ID of an Amazon
+ // Web Services account
+ //
+ // * uri – if you are granting permissions to a predefined group
+ //
+ // * emailAddress – if the value specified is the email address of an Amazon
+ // Web Services account Using email addresses to specify a grantee is only
+ // supported in the following Amazon Web Services Regions: US East (N. Virginia)
+ // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia
+ // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São
+ // Paulo) For a list of all the Amazon S3 supported Regions and endpoints,
+ // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+ // in the Amazon Web Services General Reference.
+ //
+ // For example, the following x-amz-grant-read header grants the Amazon Web
+ // Services accounts identified by account IDs permissions to read object data
+ // and its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
- // Allows grantee to read the object ACL.
+ // Specify access permissions explicitly to allows grantee to read the object
+ // ACL.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header
+ // maps to specific permissions that Amazon S3 supports in an ACL. For more
+ // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+ // in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
+ // * id – if the value specified is the canonical user ID of an Amazon
+ // Web Services account
+ //
+ // * uri – if you are granting permissions to a predefined group
+ //
+ // * emailAddress – if the value specified is the email address of an Amazon
+ // Web Services account Using email addresses to specify a grantee is only
+ // supported in the following Amazon Web Services Regions: US East (N. Virginia)
+ // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia
+ // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São
+ // Paulo) For a list of all the Amazon S3 supported Regions and endpoints,
+ // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+ // in the Amazon Web Services General Reference.
+ //
+ // For example, the following x-amz-grant-read header grants the Amazon Web
+ // Services accounts identified by account IDs permissions to read object data
+ // and its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
- // Allows grantee to write the ACL for the applicable object.
+ // Specify access permissions explicitly to allows grantee to allow grantee
+ // to write the ACL for the applicable object.
+ //
+ // By default, all objects are private. Only the owner has full access control.
+ // When uploading an object, you can use this header to explicitly grant access
+ // permissions to specific Amazon Web Services accounts or groups. This header
+ // maps to specific permissions that Amazon S3 supports in an ACL. For more
+ // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+ // in the Amazon S3 User Guide.
+ //
+ // You specify each grantee as a type=value pair, where the type is one of the
+ // following:
+ //
+ // * id – if the value specified is the canonical user ID of an Amazon
+ // Web Services account
+ //
+ // * uri – if you are granting permissions to a predefined group
+ //
+ // * emailAddress – if the value specified is the email address of an Amazon
+ // Web Services account Using email addresses to specify a grantee is only
+ // supported in the following Amazon Web Services Regions: US East (N. Virginia)
+ // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia
+ // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São
+ // Paulo) For a list of all the Amazon S3 supported Regions and endpoints,
+ // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
+ // in the Amazon Web Services General Reference.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // For example, the following x-amz-grant-read header grants the Amazon Web
+ // Services accounts identified by account IDs permissions to read object data
+ // and its metadata:
+ //
+ // x-amz-grant-read: id="11112222333", id="444455556666"
+ //
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Object key for which the multipart upload is to be initiated.
@@ -14695,23 +16279,34 @@ type CreateMultipartUploadInput struct {
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// Specifies whether you want to apply a legal hold to the uploaded object.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
// Specifies the Object Lock mode that you want to apply to the uploaded object.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
// Specifies the date and time when you want the Object Lock to expire.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (for example,
- // AES256).
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
@@ -14720,56 +16315,70 @@ type CreateMultipartUploadInput struct {
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CreateMultipartUploadInput's
// String and GoString methods.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
- // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error.
+ // Specifies the 128-bit MD5 digest of the customer-provided encryption key
+ // according to RFC 1321. Amazon S3 uses this header for a message integrity
+ // check to ensure that the encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
// JSON with the encryption context key-value pairs.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSEncryptionContext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CreateMultipartUploadInput's
// String and GoString methods.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
- // Specifies the ID of the symmetric encryption customer managed key to use
- // for object encryption. All GET and PUT requests for an object protected by
- // KMS will fail if they're not made via SSL or using SigV4. For information
- // about configuring any of the officially supported Amazon Web Services SDKs
- // and Amazon Web Services CLI, see Specifying the Signature Version in Request
- // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
- // in the Amazon S3 User Guide.
+ // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption
+ // customer managed key to use for object encryption.
+ //
+ // This functionality is not supported for directory buckets.
//
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CreateMultipartUploadInput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
+ // The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256, aws:kms).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high availability.
// Depending on performance needs, you can specify a different Storage Class.
- // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
- // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+ // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide.
+ //
+ // * For directory buckets, only the S3 Express One Zone storage class is
+ // supported to store newly created objects.
+ //
+ // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
+ //
+ // This functionality is not supported for directory buckets.
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}
@@ -15042,38 +16651,32 @@ type CreateMultipartUploadOutput struct {
// name in the request, the response includes this header. The header indicates
// when the initiated multipart upload becomes eligible for an abort operation.
// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
- // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config).
+ // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+ // in the Amazon S3 User Guide.
//
// The response also includes the x-amz-abort-rule-id header that provides the
- // ID of the lifecycle configuration rule that defines this action.
+ // ID of the lifecycle configuration rule that defines the abort action.
+ //
+ // This functionality is not supported for directory buckets.
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
// This header is returned along with the x-amz-abort-date header. It identifies
// the applicable lifecycle configuration rule that defines the action to abort
// incomplete multipart uploads.
+ //
+ // This functionality is not supported for directory buckets.
AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
// The name of the bucket to which the multipart upload was initiated. Does
// not return the access point ARN or access point alias if used.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
- //
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
+ // Access points are not supported by directory buckets.
Bucket *string `locationName:"Bucket" type:"string"`
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// The algorithm that was used to create a checksum of the object.
@@ -15084,37 +16687,50 @@ type CreateMultipartUploadOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header confirming the encryption algorithm
- // used.
+ // the response will include this header to confirm the encryption algorithm
+ // that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round-trip message integrity
+ // the response will include this header to provide the round-trip message integrity
// verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the Amazon Web Services KMS Encryption Context to use
+ // If present, indicates the Amazon Web Services KMS Encryption Context to use
// for object encryption. The value of this header is a base64-encoded UTF-8
// string holding JSON with the encryption context key-value pairs.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSEncryptionContext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CreateMultipartUploadOutput's
// String and GoString methods.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
- // If present, specifies the ID of the Key Management Service (KMS) symmetric
+ // If present, indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by CreateMultipartUploadOutput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
+ // The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256, aws:kms).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// ID for the initiated multipart upload.
@@ -15224,6 +16840,136 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo
return s
}
+type CreateSessionInput struct {
+ _ struct{} `locationName:"CreateSessionRequest" type:"structure"`
+
+ // The name of the bucket that you create a session for.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies the mode of the session that will be created, either ReadWrite
+ // or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session
+ // is capable of executing all the Zonal endpoint APIs on a directory bucket.
+ // A ReadOnly session is constrained to execute the following Zonal endpoint
+ // APIs: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts,
+ // and ListMultipartUploads.
+ SessionMode *string `location:"header" locationName:"x-amz-create-session-mode" type:"string" enum:"SessionMode"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateSessionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateSessionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateSessionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateSessionInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Bucket != nil && len(*s.Bucket) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Bucket", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateSessionInput) SetBucket(v string) *CreateSessionInput {
+ s.Bucket = &v
+ return s
+}
+
+func (s *CreateSessionInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
+// SetSessionMode sets the SessionMode field's value.
+func (s *CreateSessionInput) SetSessionMode(v string) *CreateSessionInput {
+ s.SessionMode = &v
+ return s
+}
+
+func (s *CreateSessionInput) getEndpointARN() (arn.Resource, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ return parseEndpointARN(*s.Bucket)
+}
+
+func (s *CreateSessionInput) hasEndpointARN() bool {
+ if s.Bucket == nil {
+ return false
+ }
+ return arn.IsARN(*s.Bucket)
+}
+
+// updateArnableField updates the value of the input field that
+// takes an ARN as an input. This method is useful to backfill
+// the parsed resource name from ARN into the input member.
+// It returns a pointer to a modified copy of input and an error.
+// Note that original input is not modified.
+func (s CreateSessionInput) updateArnableField(v string) (interface{}, error) {
+ if s.Bucket == nil {
+ return nil, fmt.Errorf("member Bucket is nil")
+ }
+ s.Bucket = aws.String(v)
+ return &s, nil
+}
+
+type CreateSessionOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The established temporary security credentials for the created session.
+ //
+ // Credentials is a required field
+ Credentials *SessionCredentials `locationName:"Credentials" type:"structure" required:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateSessionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateSessionOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *CreateSessionOutput) SetCredentials(v *SessionCredentials) *CreateSessionOutput {
+ s.Credentials = v
+ return s
+}
+
// The container element for specifying the default Object Lock retention settings
// for new objects placed in the specified bucket.
//
@@ -15289,6 +17035,11 @@ type Delete struct {
// The object to delete.
//
+ // Directory buckets - For directory buckets, an object that's composed entirely
+ // of whitespace characters is not supported by the DeleteObjects API operation.
+ // The request will receive a 400 Bad Request error and none of the objects
+ // in the request will be deleted.
+ //
// Objects is a required field
Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"`
@@ -15358,9 +17109,9 @@ type DeleteBucketAnalyticsConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The ID that identifies the analytics configuration.
@@ -15488,9 +17239,9 @@ type DeleteBucketCorsInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -15605,9 +17356,9 @@ type DeleteBucketEncryptionInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -15718,12 +17469,25 @@ type DeleteBucketInput struct {
// Specifies the bucket being deleted.
//
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+ // . Virtual-hosted-style requests aren't supported. Directory bucket names
+ // must be unique in the chosen Availability Zone. Bucket names must also follow
+ // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3).
+ // For information about bucket naming restrictions, see Directory bucket naming
+ // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation.
+ // If you specify this header, the request fails with the HTTP status code 501
+ // Not Implemented.
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -15935,9 +17699,9 @@ type DeleteBucketInventoryConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The ID used to identify the inventory configuration.
@@ -16065,9 +17829,9 @@ type DeleteBucketLifecycleInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -16181,9 +17945,9 @@ type DeleteBucketMetricsConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The ID used to identify the metrics configuration. The ID has a 64 character
@@ -16334,9 +18098,9 @@ type DeleteBucketOwnershipControlsInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -16447,12 +18211,25 @@ type DeleteBucketPolicyInput struct {
// The bucket name.
//
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+ // . Virtual-hosted-style requests aren't supported. Directory bucket names
+ // must be unique in the chosen Availability Zone. Bucket names must also follow
+ // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3).
+ // For information about bucket naming restrictions, see Directory bucket naming
+ // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation.
+ // If you specify this header, the request fails with the HTTP status code 501
+ // Not Implemented.
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -16566,9 +18343,9 @@ type DeleteBucketReplicationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -16682,9 +18459,9 @@ type DeleteBucketTaggingInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -16798,9 +18575,9 @@ type DeleteBucketWebsiteInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -16917,7 +18694,7 @@ type DeleteMarkerEntry struct {
// The object key.
Key *string `min:"1" type:"string"`
- // Date and time the object was last modified.
+ // Date and time when the object was last modified.
LastModified *time.Time `type:"timestamp"`
// The account that created the delete marker.>
@@ -17026,19 +18803,33 @@ type DeleteObjectInput struct {
// The bucket name of the bucket containing the object.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -17047,11 +18838,13 @@ type DeleteObjectInput struct {
// Indicates whether S3 Object Lock should bypass Governance-mode restrictions
// to process this operation. To use this header, you must have the s3:BypassGovernanceRetention
// permission.
+ //
+ // This functionality is not supported for directory buckets.
BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Key name of the object to delete.
@@ -17063,16 +18856,25 @@ type DeleteObjectInput struct {
// and the value that is displayed on your authentication device. Required to
// permanently delete a versioned object if versioning is configured with MFA
// delete enabled.
+ //
+ // This functionality is not supported for directory buckets.
MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // VersionId used to reference a specific version of the object.
+ // Version ID used to reference a specific version of the object.
+ //
+ // For directory buckets in this API operation, only the null value of the version
+ // ID is supported.
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -17195,16 +18997,24 @@ func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) {
type DeleteObjectOutput struct {
_ struct{} `type:"structure"`
- // Specifies whether the versioned object that was permanently deleted was (true)
- // or was not (false) a delete marker.
+ // Indicates whether the specified object version that was permanently deleted
+ // was (true) or was not (false) a delete marker before deletion. In a simple
+ // DELETE, this header indicates whether (true) or not (false) the current version
+ // of the object is a delete marker.
+ //
+ // This functionality is not supported for directory buckets.
DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// Returns the version ID of the delete marker created as a result of the DELETE
// operation.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
}
@@ -17249,27 +19059,30 @@ type DeleteObjectTaggingInput struct {
// The bucket name containing the objects from which to remove the tags.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The key that identifies the object in the bucket from which to remove all
@@ -17416,19 +19229,33 @@ type DeleteObjectsInput struct {
// The bucket name containing the objects to delete.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -17437,22 +19264,38 @@ type DeleteObjectsInput struct {
// Specifies whether you want to delete this object even if it has a Governance-type
// Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention
// permission.
+ //
+ // This functionality is not supported for directory buckets.
BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon
+ // S3 fails the request with the HTTP status code 400 Bad Request.
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the supported
+ // algorithm from the following list:
+ //
+ // * CRC32
+ //
+ // * CRC32C
+ //
+ // * SHA1
+ //
+ // * SHA256
+ //
+ // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm,
+ // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum
+ // algorithm that matches the provided value in x-amz-checksum-algorithm .
+ //
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
// parameter.
//
- // This checksum algorithm must be the same for all parts and it match the checksum
- // value supplied in the CreateMultipartUpload request.
- //
// The AWS SDK for Go v1 does not support automatic computing request payload
// checksum. This feature is available in the AWS SDK for Go v2. If a value
// is specified for this parameter, the matching algorithm's checksum member
@@ -17468,22 +19311,37 @@ type DeleteObjectsInput struct {
// Delete is a required field
Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The concatenation of the authentication device's serial number, a space,
// and the value that is displayed on your authentication device. Required to
// permanently delete a versioned object if versioning is configured with MFA
// delete enabled.
+ //
+ // When performing the DeleteObjects operation on an MFA delete enabled bucket,
+ // which attempts to delete the specified versioned objects, you must include
+ // an MFA token. If you don't provide an MFA token, the entire request will
+ // fail, even if there are non-versioned objects that you are trying to delete.
+ // If you provide an invalid token, whether there are versioned object keys
+ // in the request or not, the entire Multi-Object Delete request will fail.
+ // For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
}
@@ -17618,6 +19476,8 @@ type DeleteObjectsOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
}
@@ -17665,9 +19525,9 @@ type DeletePublicAccessBlockInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -17777,20 +19637,27 @@ func (s DeletePublicAccessBlockOutput) GoString() string {
type DeletedObject struct {
_ struct{} `type:"structure"`
- // Specifies whether the versioned object that was permanently deleted was (true)
- // or was not (false) a delete marker. In a simple DELETE, this header indicates
- // whether (true) or not (false) a delete marker was created.
+ // Indicates whether the specified object version that was permanently deleted
+ // was (true) or was not (false) a delete marker before deletion. In a simple
+ // DELETE, this header indicates whether (true) or not (false) the current version
+ // of the object is a delete marker.
+ //
+ // This functionality is not supported for directory buckets.
DeleteMarker *bool `type:"boolean"`
// The version ID of the delete marker created as a result of the DELETE operation.
// If you delete a specific object version, the value returned by this header
// is the version ID of the object version deleted.
+ //
+ // This functionality is not supported for directory buckets.
DeleteMarkerVersionId *string `type:"string"`
// The name of the deleted object.
Key *string `min:"1" type:"string"`
// The version ID of the deleted object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `type:"string"`
}
@@ -18506,6 +20373,8 @@ type Error struct {
Message *string `type:"string"`
// The version ID of the error.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `type:"string"`
}
@@ -18677,8 +20546,15 @@ func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplicati
return s
}
-// Specifies the Amazon S3 object key name to filter on and whether to filter
-// on the suffix or prefix of the key name.
+// Specifies the Amazon S3 object key name to filter on. An object key name
+// is the name assigned to an object in your Amazon S3 bucket. You specify whether
+// to filter on the suffix or prefix of the object key name. A prefix is a specific
+// string of characters at the beginning of an object key name, which you can
+// use to organize objects. For example, you can start the key names of related
+// objects with a prefix, such as 2023- or engineering/. Then, you can use FilterRule
+// to find objects in a bucket with key names that have the same prefix. A suffix
+// is similar to a prefix, but it is at the end of the object key name instead
+// of at the beginning.
type FilterRule struct {
_ struct{} `type:"structure"`
@@ -18731,16 +20607,20 @@ type GetBucketAccelerateConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
}
@@ -18835,6 +20715,8 @@ type GetBucketAccelerateConfigurationOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// The accelerate configuration of the bucket.
@@ -18876,10 +20758,10 @@ type GetBucketAclInput struct {
// Specifies the S3 bucket whose ACL is being requested.
//
- // To use this API operation against an access point, provide the alias of the
- // access point in place of the bucket name.
+ // When you use this API operation with an access point, provide the alias of
+ // the access point in place of the bucket name.
//
- // To use this API operation against an Object Lambda access point, provide
+ // When you use this API operation with an Object Lambda access point, provide
// the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
@@ -18888,9 +20770,9 @@ type GetBucketAclInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -19022,9 +20904,9 @@ type GetBucketAnalyticsConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The ID that identifies the analytics configuration.
@@ -19158,10 +21040,10 @@ type GetBucketCorsInput struct {
// The bucket name for which to get the cors configuration.
//
- // To use this API operation against an access point, provide the alias of the
- // access point in place of the bucket name.
+ // When you use this API operation with an access point, provide the alias of
+ // the access point in place of the bucket name.
//
- // To use this API operation against an Object Lambda access point, provide
+ // When you use this API operation with an Object Lambda access point, provide
// the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
@@ -19170,9 +21052,9 @@ type GetBucketCorsInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -19297,9 +21179,9 @@ type GetBucketEncryptionInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -19551,9 +21433,9 @@ type GetBucketInventoryConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The ID used to identify the inventory configuration.
@@ -19690,9 +21572,9 @@ type GetBucketLifecycleConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -19815,9 +21697,9 @@ type GetBucketLifecycleInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -19937,10 +21819,10 @@ type GetBucketLocationInput struct {
// The name of the bucket for which to get the location.
//
- // To use this API operation against an access point, provide the alias of the
- // access point in place of the bucket name.
+ // When you use this API operation with an access point, provide the alias of
+ // the access point in place of the bucket name.
//
- // To use this API operation against an Object Lambda access point, provide
+ // When you use this API operation with an Object Lambda access point, provide
// the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
@@ -19949,9 +21831,9 @@ type GetBucketLocationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -20076,9 +21958,9 @@ type GetBucketLoggingInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -20204,9 +22086,9 @@ type GetBucketMetricsConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The ID used to identify the metrics configuration. The ID has a 64 character
@@ -20341,10 +22223,10 @@ type GetBucketNotificationConfigurationRequest struct {
// The name of the bucket for which to get the notification configuration.
//
- // To use this API operation against an access point, provide the alias of the
- // access point in place of the bucket name.
+ // When you use this API operation with an access point, provide the alias of
+ // the access point in place of the bucket name.
//
- // To use this API operation against an Object Lambda access point, provide
+ // When you use this API operation with an Object Lambda access point, provide
// the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
@@ -20353,9 +22235,9 @@ type GetBucketNotificationConfigurationRequest struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -20447,9 +22329,9 @@ type GetBucketOwnershipControlsInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -20568,23 +22450,40 @@ func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipCont
type GetBucketPolicyInput struct {
_ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"`
- // The bucket name for which to get the bucket policy.
+ // The bucket name to get the bucket policy for.
//
- // To use this API operation against an access point, provide the alias of the
- // access point in place of the bucket name.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+ // . Virtual-hosted-style requests aren't supported. Directory bucket names
+ // must be unique in the chosen Availability Zone. Bucket names must also follow
+ // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3).
+ // For information about bucket naming restrictions, see Directory bucket naming
+ // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide
//
- // To use this API operation against an Object Lambda access point, provide
- // the alias of the Object Lambda access point in place of the bucket name.
- // If the Object Lambda access point alias in a request is not valid, the error
- // code InvalidAccessPointAliasError is returned. For more information about
- // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList).
+ // Access points - When you use this API operation with an access point, provide
+ // the alias of the access point in place of the bucket name.
+ //
+ // Object Lambda access points - When you use this API operation with an Object
+ // Lambda access point, provide the alias of the Object Lambda access point
+ // in place of the bucket name. If the Object Lambda access point alias in a
+ // request is not valid, the error code InvalidAccessPointAliasError is returned.
+ // For more information about InvalidAccessPointAliasError, see List of Error
+ // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList).
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation.
+ // If you specify this header, the request fails with the HTTP status code 501
+ // Not Implemented.
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -20707,9 +22606,9 @@ type GetBucketPolicyStatusInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -20832,9 +22731,9 @@ type GetBucketReplicationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -20958,9 +22857,9 @@ type GetBucketRequestPaymentInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -21083,9 +22982,9 @@ type GetBucketTaggingInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -21210,9 +23109,9 @@ type GetBucketVersioningInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -21346,9 +23245,9 @@ type GetBucketWebsiteInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -21496,8 +23395,10 @@ type GetObjectAclInput struct {
// The bucket name that contains the object for which to get the ACL information.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
@@ -21506,9 +23407,9 @@ type GetObjectAclInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The key of the object for which to get the ACL information.
@@ -21517,13 +23418,19 @@ type GetObjectAclInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // VersionId used to reference a specific version of the object.
+ // Version ID used to reference a specific version of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -21642,6 +23549,8 @@ type GetObjectAclOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
}
@@ -21686,27 +23595,41 @@ type GetObjectAttributesInput struct {
// The name of the bucket that contains the object.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The object key.
@@ -21728,13 +23651,19 @@ type GetObjectAttributesInput struct {
PartNumberMarker *int64 `location:"header" locationName:"x-amz-part-number-marker" type:"integer"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
@@ -21743,6 +23672,8 @@ type GetObjectAttributesInput struct {
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by GetObjectAttributesInput's
// String and GoString methods.
@@ -21751,9 +23682,16 @@ type GetObjectAttributesInput struct {
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// The version ID used to reference a specific version of the object.
+ //
+ // S3 Versioning isn't enabled and supported for directory buckets. For this
+ // API operation, only the null value of the version ID is supported by directory
+ // buckets. You can only specify null to the versionId query parameter in the
+ // request.
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -21915,6 +23853,8 @@ type GetObjectAttributesOutput struct {
// Specifies whether the object retrieved was (true) or was not (false) a delete
// marker. If false, this response header does not appear in the response.
+ //
+ // This functionality is not supported for directory buckets.
DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
// An ETag is an opaque identifier assigned by a web server to a specific version
@@ -21932,15 +23872,22 @@ type GetObjectAttributesOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// Provides the storage class information of the object. Amazon S3 returns this
// header for all objects except for S3 Standard storage class objects.
//
// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html).
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported
+ // by directory buckets to store objects.
StorageClass *string `type:"string" enum:"StorageClass"`
// The version ID of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
}
@@ -22038,6 +23985,15 @@ type GetObjectAttributesParts struct {
// A container for elements related to a particular part. A response can contain
// zero or more Parts elements.
+ //
+ // * General purpose buckets - For GetObjectAttributes, if a additional checksum
+ // (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1,
+ // or x-amz-checksum-sha256) isn't applied to the object specified in the
+ // request, the response doesn't return Part.
+ //
+ // * Directory buckets - For GetObjectAttributes, no matter whether a additional
+ // checksum is applied to the object specified in the request, the response
+ // returns Part.
Parts []*ObjectPart `locationName:"Part" type:"list" flattened:"true"`
// The total number of parts.
@@ -22103,21 +24059,37 @@ type GetObjectInput struct {
// The bucket name containing the object.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
+ // Object Lambda access points - When you use this action with an Object Lambda
+ // access point, you must direct requests to the Object Lambda access point
+ // hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -22129,25 +24101,50 @@ type GetObjectInput struct {
// validation. This feature is available in the AWS SDK for Go v2.
ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
- // Return the object only if its entity tag (ETag) is the same as the one specified;
- // otherwise, return a 412 (precondition failed) error.
+ // Return the object only if its entity tag (ETag) is the same as the one specified
+ // in this header; otherwise, return a 412 Precondition Failed error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since
+ // condition evaluates to false; then, S3 returns 200 OK and the data requested.
+ //
+ // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
// Return the object only if it has been modified since the specified time;
- // otherwise, return a 304 (not modified) error.
+ // otherwise, return a 304 Not Modified error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in
+ // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since
+ // condition evaluates to true; then, S3 returns 304 Not Modified status code.
+ //
+ // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
// Return the object only if its entity tag (ETag) is different from the one
- // specified; otherwise, return a 304 (not modified) error.
+ // specified in this header; otherwise, return a 304 Not Modified error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in
+ // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since
+ // condition evaluates to true; then, S3 returns 304 Not Modified HTTP status
+ // code.
+ //
+ // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
// Return the object only if it has not been modified since the specified time;
- // otherwise, return a 412 (precondition failed) error.
+ // otherwise, return a 412 Precondition Failed error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since
+ // condition evaluates to false; then, S3 returns 200 OK and the data requested.
+ //
+ // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
// Key of the object to get.
@@ -22160,7 +24157,7 @@ type GetObjectInput struct {
// Useful for downloading just a part of an object.
PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
- // Downloads the specified range bytes of an object. For more information about
+ // Downloads the specified byte range of an object. For more information about
// the HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range
// (https://www.rfc-editor.org/rfc/rfc9110.html#name-range).
//
@@ -22168,16 +24165,20 @@ type GetObjectInput struct {
Range *string `location:"header" locationName:"Range" type:"string"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Sets the Cache-Control header of the response.
ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"`
- // Sets the Content-Disposition header of the response
+ // Sets the Content-Disposition header of the response.
ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"`
// Sets the Content-Encoding header of the response.
@@ -22192,27 +24193,92 @@ type GetObjectInput struct {
// Sets the Expires header of the response.
ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"`
- // Specifies the algorithm to use to when decrypting the object (for example,
- // AES256).
+ // Specifies the algorithm to use when decrypting the object (for example, AES256).
+ //
+ // If you encrypt an object by using server-side encryption with customer-provided
+ // encryption keys (SSE-C) when you store the object in Amazon S3, then when
+ // you GET the object, you must use the following headers:
+ //
+ // * x-amz-server-side-encryption-customer-algorithm
+ //
+ // * x-amz-server-side-encryption-customer-key
+ //
+ // * x-amz-server-side-encryption-customer-key-MD5
+ //
+ // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
+ // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
- // Specifies the customer-provided encryption key for Amazon S3 used to encrypt
- // the data. This value is used to decrypt the object when recovering it and
- // must match the one used when storing the data. The key must be appropriate
- // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
- // header.
+ // Specifies the customer-provided encryption key that you originally provided
+ // for Amazon S3 to encrypt the data before storing it. This value is used to
+ // decrypt the object when recovering it and must match the one used when storing
+ // the data. The key must be appropriate for use with the algorithm specified
+ // in the x-amz-server-side-encryption-customer-algorithm header.
+ //
+ // If you encrypt an object by using server-side encryption with customer-provided
+ // encryption keys (SSE-C) when you store the object in Amazon S3, then when
+ // you GET the object, you must use the following headers:
+ //
+ // * x-amz-server-side-encryption-customer-algorithm
+ //
+ // * x-amz-server-side-encryption-customer-key
+ //
+ // * x-amz-server-side-encryption-customer-key-MD5
+ //
+ // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
+ // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
//
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by GetObjectInput's
// String and GoString methods.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
- // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
- // Amazon S3 uses this header for a message integrity check to ensure that the
- // encryption key was transmitted without error.
+ // Specifies the 128-bit MD5 digest of the customer-provided encryption key
+ // according to RFC 1321. Amazon S3 uses this header for a message integrity
+ // check to ensure that the encryption key was transmitted without error.
+ //
+ // If you encrypt an object by using server-side encryption with customer-provided
+ // encryption keys (SSE-C) when you store the object in Amazon S3, then when
+ // you GET the object, you must use the following headers:
+ //
+ // * x-amz-server-side-encryption-customer-algorithm
+ //
+ // * x-amz-server-side-encryption-customer-key
+ //
+ // * x-amz-server-side-encryption-customer-key-MD5
+ //
+ // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided
+ // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // VersionId used to reference a specific version of the object.
+ // Version ID used to reference a specific version of the object.
+ //
+ // By default, the GetObject operation returns the current version of an object.
+ // To return a different version, use the versionId subresource.
+ //
+ // * If you include a versionId in your request header, you must have the
+ // s3:GetObjectVersion permission to access a specific version of an object.
+ // The s3:GetObject permission is not required in this scenario.
+ //
+ // * If you request the current version of an object without a specific versionId
+ // in the request header, only the s3:GetObject permission is required. The
+ // s3:GetObjectVersion permission is not required in this scenario.
+ //
+ // * Directory buckets - S3 Versioning isn't enabled and supported for directory
+ // buckets. For this API operation, only the null value of the version ID
+ // is supported by directory buckets. You can only specify null to the versionId
+ // query parameter in the request.
+ //
+ // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html).
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -22429,8 +24495,10 @@ type GetObjectLegalHoldInput struct {
// The bucket name containing the object whose legal hold status you want to
// retrieve.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
@@ -22439,9 +24507,9 @@ type GetObjectLegalHoldInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The key name for the object whose legal hold status you want to retrieve.
@@ -22450,10 +24518,14 @@ type GetObjectLegalHoldInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The version ID of the object whose legal hold status you want to retrieve.
@@ -22568,7 +24640,7 @@ type GetObjectLegalHoldOutput struct {
_ struct{} `type:"structure" payload:"LegalHold"`
// The current legal hold status for the specified object.
- LegalHold *ObjectLockLegalHold `type:"structure"`
+ LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure"`
}
// String returns the string representation.
@@ -22600,8 +24672,10 @@ type GetObjectLockConfigurationInput struct {
// The bucket whose Object Lock configuration you want to retrieve.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
@@ -22610,9 +24684,9 @@ type GetObjectLockConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -22730,7 +24804,7 @@ func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectL
type GetObjectOutput struct {
_ struct{} `type:"structure" payload:"Body"`
- // Indicates that a range of bytes was specified.
+ // Indicates that a range of bytes was specified in the request.
AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
// Object data.
@@ -22738,47 +24812,41 @@ type GetObjectOutput struct {
// Indicates whether the object uses an S3 Bucket Key for server-side encryption
// with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// Specifies caching behavior along the request/reply chain.
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. For more information, see Checking
+ // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. For more information, see
+ // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. For more information, see Checking
+ // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. For more information, see
+ // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"`
// Specifies presentational information for the object.
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
- // Specifies what content encodings have been applied to the object and thus
+ // Indicates what content encodings have been applied to the object and thus
// what decoding mechanisms must be applied to obtain the media-type referenced
// by the Content-Type header field.
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
@@ -22795,23 +24863,40 @@ type GetObjectOutput struct {
// A standard MIME type describing the format of the object data.
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
- // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Indicates whether the object retrieved was (true) or was not (false) a Delete
// Marker. If false, this response header does not appear in the response.
+ //
+ // * If the current version of the object is a delete marker, Amazon S3 behaves
+ // as if the object was deleted and includes x-amz-delete-marker: true in
+ // the response.
+ //
+ // * If the specified version in the request is a delete marker, the response
+ // returns a 405 Method Not Allowed error and the Last-Modified: timestamp
+ // response header.
DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
// An entity tag (ETag) is an opaque identifier assigned by a web server to
// a specific version of a resource found at a URL.
ETag *string `location:"header" locationName:"ETag" type:"string"`
- // If the object expiration is configured (see PUT Bucket lifecycle), the response
- // includes this header. It includes the expiry-date and rule-id key-value pairs
- // providing object expiration information. The value of the rule-id is URL-encoded.
+ // If the object expiration is configured (see PutBucketLifecycleConfiguration
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)),
+ // the response includes this header. It includes the expiry-date and rule-id
+ // key-value pairs providing object expiration information. The value of the
+ // rule-id is URL-encoded.
+ //
+ // This functionality is not supported for directory buckets.
Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
// The date and time at which the object is no longer cacheable.
Expires *string `location:"header" locationName:"Expires" type:"string"`
- // Creation date of the object.
+ // Date and time when the object was last modified.
+ //
+ // General purpose buckets - When you specify a versionId of the object in your
+ // request, if the specified version in the request is a delete marker, the
+ // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp
+ // response header.
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
// A map of metadata to store with the object in S3.
@@ -22821,20 +24906,29 @@ type GetObjectOutput struct {
// Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase.
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
- // This is set to the number of metadata entries not returned in x-amz-meta
- // headers. This can happen if you create metadata using an API like SOAP that
- // supports more flexible metadata than the REST API. For example, using SOAP,
- // you can create metadata whose values are not legal HTTP headers.
+ // This is set to the number of metadata entries not returned in the headers
+ // that are prefixed with x-amz-meta-. This can happen if you create metadata
+ // using an API like SOAP that supports more flexible metadata than the REST
+ // API. For example, using SOAP, you can create metadata whose values are not
+ // legal HTTP headers.
+ //
+ // This functionality is not supported for directory buckets.
MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
// Indicates whether this object has an active legal hold. This field is only
// returned if you have permission to view an object's legal hold status.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
- // The Object Lock mode currently in place for this object.
+ // The Object Lock mode that's currently in place for this object.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
// The date and time when this object's Object Lock will expire.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// The count of parts this object has. This value is only returned if you specify
@@ -22843,51 +24937,80 @@ type GetObjectOutput struct {
// Amazon S3 can return this if your request involves a bucket that is either
// a source or destination in a replication rule.
+ //
+ // This functionality is not supported for directory buckets.
ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// Provides information about object restoration action and expiration time
// of the restored object copy.
+ //
+ // This functionality is not supported for directory buckets. Only the S3 Express
+ // One Zone storage class is supported by directory buckets to store objects.
Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header confirming the encryption algorithm
- // used.
+ // the response will include this header to confirm the encryption algorithm
+ // that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round-trip message integrity
+ // the response will include this header to provide the round-trip message integrity
// verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the ID of the Key Management Service (KMS) symmetric
+ // If present, indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by GetObjectOutput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
+ // The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256, aws:kms, aws:kms:dsse).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// Provides storage class information of the object. Amazon S3 returns this
// header for all objects except for S3 Standard storage class objects.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported
+ // by directory buckets to store objects.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
- // The number of tags, if any, on the object.
+ // The number of tags, if any, on the object, when you have the relevant permission
+ // to read object tags.
+ //
+ // You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
+ // to retrieve the tag set associated with an object.
+ //
+ // This functionality is not supported for directory buckets.
TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"`
- // Version of the object.
+ // Version ID of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}
@@ -23131,8 +25254,10 @@ type GetObjectRetentionInput struct {
// The bucket name containing the object whose retention settings you want to
// retrieve.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
@@ -23141,9 +25266,9 @@ type GetObjectRetentionInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The key name for the object whose retention settings you want to retrieve.
@@ -23152,10 +25277,14 @@ type GetObjectRetentionInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The version ID for the object whose retention settings you want to retrieve.
@@ -23270,7 +25399,7 @@ type GetObjectRetentionOutput struct {
_ struct{} `type:"structure" payload:"Retention"`
// The container element for an object's retention settings.
- Retention *ObjectLockRetention `type:"structure"`
+ Retention *ObjectLockRetention `locationName:"Retention" type:"structure"`
}
// String returns the string representation.
@@ -23302,27 +25431,30 @@ type GetObjectTaggingInput struct {
// The bucket name containing the object for which to get the tagging information.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Object key for which to get the tagging information.
@@ -23331,10 +25463,14 @@ type GetObjectTaggingInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The versionId of the object for which to get the tagging information.
@@ -23496,9 +25632,9 @@ type GetObjectTorrentInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The object key for which to get the information.
@@ -23507,10 +25643,14 @@ type GetObjectTorrentInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
}
@@ -23620,6 +25760,8 @@ type GetObjectTorrentOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
}
@@ -23662,9 +25804,9 @@ type GetPublicAccessBlockInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -23994,33 +26136,48 @@ type HeadBucketInput struct {
// The bucket name.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with an Object Lambda access point, provide the
- // alias of the Object Lambda access point in place of the bucket name. If the
- // Object Lambda access point alias in a request is not valid, the error code
- // InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError,
- // see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList).
- //
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Object Lambda access points - When you use this API operation with an Object
+ // Lambda access point, provide the alias of the Object Lambda access point
+ // in place of the bucket name. If the Object Lambda access point alias in a
+ // request is not valid, the error code InvalidAccessPointAliasError is returned.
+ // For more information about InvalidAccessPointAliasError, see List of Error
+ // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList).
+ //
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -24106,6 +26263,30 @@ func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) {
type HeadBucketOutput struct {
_ struct{} `type:"structure"`
+
+ // Indicates whether the bucket name used in the request is an access point
+ // alias.
+ //
+ // This functionality is not supported for directory buckets.
+ AccessPointAlias *bool `location:"header" locationName:"x-amz-access-point-alias" type:"boolean"`
+
+ // The name of the location where the bucket will be created.
+ //
+ // For directory buckets, the AZ ID of the Availability Zone where the bucket
+ // is created. An example AZ ID value is usw2-az1.
+ //
+ // This functionality is only supported by directory buckets.
+ BucketLocationName *string `location:"header" locationName:"x-amz-bucket-location-name" type:"string"`
+
+ // The type of location where the bucket is created.
+ //
+ // This functionality is only supported by directory buckets.
+ BucketLocationType *string `location:"header" locationName:"x-amz-bucket-location-type" type:"string" enum:"LocationType"`
+
+ // The Region that the bucket is located.
+ //
+ // This functionality is not supported for directory buckets.
+ BucketRegion *string `location:"header" locationName:"x-amz-bucket-region" type:"string"`
}
// String returns the string representation.
@@ -24126,24 +26307,62 @@ func (s HeadBucketOutput) GoString() string {
return s.String()
}
+// SetAccessPointAlias sets the AccessPointAlias field's value.
+func (s *HeadBucketOutput) SetAccessPointAlias(v bool) *HeadBucketOutput {
+ s.AccessPointAlias = &v
+ return s
+}
+
+// SetBucketLocationName sets the BucketLocationName field's value.
+func (s *HeadBucketOutput) SetBucketLocationName(v string) *HeadBucketOutput {
+ s.BucketLocationName = &v
+ return s
+}
+
+// SetBucketLocationType sets the BucketLocationType field's value.
+func (s *HeadBucketOutput) SetBucketLocationType(v string) *HeadBucketOutput {
+ s.BucketLocationType = &v
+ return s
+}
+
+// SetBucketRegion sets the BucketRegion field's value.
+func (s *HeadBucketOutput) SetBucketRegion(v string) *HeadBucketOutput {
+ s.BucketRegion = &v
+ return s
+}
+
type HeadObjectInput struct {
_ struct{} `locationName:"HeadObjectRequest" type:"structure"`
- // The name of the bucket containing the object.
+ // The name of the bucket that contains the object.
+ //
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -24156,25 +26375,69 @@ type HeadObjectInput struct {
// must have permission to use the kms:Decrypt action for the request to succeed.
ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Return the object only if its entity tag (ETag) is the same as the one specified;
// otherwise, return a 412 (precondition failed) error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows:
+ //
+ // * If-Match condition evaluates to true, and;
+ //
+ // * If-Unmodified-Since condition evaluates to false;
+ //
+ // Then Amazon S3 returns 200 OK and the data requested.
+ //
+ // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
// Return the object only if it has been modified since the specified time;
// otherwise, return a 304 (not modified) error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in
+ // the request as follows:
+ //
+ // * If-None-Match condition evaluates to false, and;
+ //
+ // * If-Modified-Since condition evaluates to true;
+ //
+ // Then Amazon S3 returns the 304 Not Modified response code.
+ //
+ // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"`
// Return the object only if its entity tag (ETag) is different from the one
// specified; otherwise, return a 304 (not modified) error.
+ //
+ // If both of the If-None-Match and If-Modified-Since headers are present in
+ // the request as follows:
+ //
+ // * If-None-Match condition evaluates to false, and;
+ //
+ // * If-Modified-Since condition evaluates to true;
+ //
+ // Then Amazon S3 returns the 304 Not Modified response code.
+ //
+ // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
// Return the object only if it has not been modified since the specified time;
// otherwise, return a 412 (precondition failed) error.
+ //
+ // If both of the If-Match and If-Unmodified-Since headers are present in the
+ // request as follows:
+ //
+ // * If-Match condition evaluates to true, and;
+ //
+ // * If-Unmodified-Since condition evaluates to false;
+ //
+ // Then Amazon S3 returns 200 OK and the data requested.
+ //
+ // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232).
IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"`
// The object key.
@@ -24194,14 +26457,37 @@ type HeadObjectInput struct {
Range *string `location:"header" locationName:"Range" type:"string"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (for example,
- // AES256).
+ // Sets the Cache-Control header of the response.
+ ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"`
+
+ // Sets the Content-Disposition header of the response.
+ ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"`
+
+ // Sets the Content-Encoding header of the response.
+ ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"`
+
+ // Sets the Content-Language header of the response.
+ ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"`
+
+ // Sets the Content-Type header of the response.
+ ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
+
+ // Sets the Expires header of the response.
+ ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
@@ -24210,6 +26496,8 @@ type HeadObjectInput struct {
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by HeadObjectInput's
// String and GoString methods.
@@ -24218,9 +26506,14 @@ type HeadObjectInput struct {
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // VersionId used to reference a specific version of the object.
+ // Version ID used to reference a specific version of the object.
+ //
+ // For directory buckets in this API operation, only the null value of the version
+ // ID is supported.
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -24337,6 +26630,42 @@ func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput {
return s
}
+// SetResponseCacheControl sets the ResponseCacheControl field's value.
+func (s *HeadObjectInput) SetResponseCacheControl(v string) *HeadObjectInput {
+ s.ResponseCacheControl = &v
+ return s
+}
+
+// SetResponseContentDisposition sets the ResponseContentDisposition field's value.
+func (s *HeadObjectInput) SetResponseContentDisposition(v string) *HeadObjectInput {
+ s.ResponseContentDisposition = &v
+ return s
+}
+
+// SetResponseContentEncoding sets the ResponseContentEncoding field's value.
+func (s *HeadObjectInput) SetResponseContentEncoding(v string) *HeadObjectInput {
+ s.ResponseContentEncoding = &v
+ return s
+}
+
+// SetResponseContentLanguage sets the ResponseContentLanguage field's value.
+func (s *HeadObjectInput) SetResponseContentLanguage(v string) *HeadObjectInput {
+ s.ResponseContentLanguage = &v
+ return s
+}
+
+// SetResponseContentType sets the ResponseContentType field's value.
+func (s *HeadObjectInput) SetResponseContentType(v string) *HeadObjectInput {
+ s.ResponseContentType = &v
+ return s
+}
+
+// SetResponseExpires sets the ResponseExpires field's value.
+func (s *HeadObjectInput) SetResponseExpires(v time.Time) *HeadObjectInput {
+ s.ResponseExpires = &v
+ return s
+}
+
// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput {
s.SSECustomerAlgorithm = &v
@@ -24402,51 +26731,63 @@ type HeadObjectOutput struct {
AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
// The archive state of the head object.
+ //
+ // This functionality is not supported for directory buckets.
ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"`
// Indicates whether the object uses an S3 Bucket Key for server-side encryption
// with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// Specifies caching behavior along the request/reply chain.
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use the API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"`
// Specifies presentational information for the object.
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
- // Specifies what content encodings have been applied to the object and thus
+ // Indicates what content encodings have been applied to the object and thus
// what decoding mechanisms must be applied to obtain the media-type referenced
// by the Content-Type header field.
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
@@ -24462,21 +26803,27 @@ type HeadObjectOutput struct {
// Specifies whether the object retrieved was (true) or was not (false) a Delete
// Marker. If false, this response header does not appear in the response.
+ //
+ // This functionality is not supported for directory buckets.
DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
// An entity tag (ETag) is an opaque identifier assigned by a web server to
// a specific version of a resource found at a URL.
ETag *string `location:"header" locationName:"ETag" type:"string"`
- // If the object expiration is configured (see PUT Bucket lifecycle), the response
- // includes this header. It includes the expiry-date and rule-id key-value pairs
- // providing object expiration information. The value of the rule-id is URL-encoded.
+ // If the object expiration is configured (see PutBucketLifecycleConfiguration
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)),
+ // the response includes this header. It includes the expiry-date and rule-id
+ // key-value pairs providing object expiration information. The value of the
+ // rule-id is URL-encoded.
+ //
+ // This functionality is not supported for directory buckets.
Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
// The date and time at which the object is no longer cacheable.
Expires *string `location:"header" locationName:"Expires" type:"string"`
- // Creation date of the object.
+ // Date and time when the object was last modified.
LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"`
// A map of metadata to store with the object in S3.
@@ -24490,6 +26837,8 @@ type HeadObjectOutput struct {
// headers. This can happen if you create metadata using an API like SOAP that
// supports more flexible metadata than the REST API. For example, using SOAP,
// you can create metadata whose values are not legal HTTP headers.
+ //
+ // This functionality is not supported for directory buckets.
MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
// Specifies whether a legal hold is in effect for this object. This header
@@ -24497,15 +26846,21 @@ type HeadObjectOutput struct {
// This header is not returned if the specified version of this object has never
// had a legal hold applied. For more information about S3 Object Lock, see
// Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
// The Object Lock mode, if any, that's in effect for this object. This header
// is only returned if the requester has the s3:GetObjectRetention permission.
// For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
// The date and time when the Object Lock retention period expires. This header
// is only returned if the requester has the s3:GetObjectRetention permission.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// The count of parts this object has. This value is only returned if you specify
@@ -24544,10 +26899,14 @@ type HeadObjectOutput struct {
// header will return FAILED.
//
// For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html).
+ //
+ // This functionality is not supported for directory buckets.
ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// If the object is an archived object (an object whose storage class is GLACIER),
@@ -24565,42 +26924,61 @@ type HeadObjectOutput struct {
//
// For more information about archiving objects, see Transitioning Objects:
// General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations).
+ //
+ // This functionality is not supported for directory buckets. Only the S3 Express
+ // One Zone storage class is supported by directory buckets to store objects.
Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header confirming the encryption algorithm
- // used.
+ // the response will include this header to confirm the encryption algorithm
+ // that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round-trip message integrity
+ // the response will include this header to provide the round-trip message integrity
// verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the ID of the Key Management Service (KMS) symmetric
+ // If present, indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by HeadObjectOutput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
+ // The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256, aws:kms, aws:kms:dsse).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// Provides storage class information of the object. Amazon S3 returns this
// header for all objects except for S3 Standard storage class objects.
//
// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html).
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported
+ // by directory buckets to store objects.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
- // Version of the object.
+ // Version ID of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}
@@ -24831,9 +27209,9 @@ type IndexDocument struct {
_ struct{} `type:"structure"`
// A suffix that is appended to a request that is for a directory on the website
- // endpoint (for example,if the suffix is index.html and you make a request
- // to samplebucket/images/ the data that is returned will be for the object
- // with the key name images/index.html) The suffix must not be empty and must
+ // endpoint. (For example, if the suffix is index.html and you make a request
+ // to samplebucket/images/, the data that is returned will be for the object
+ // with the key name images/index.html.) The suffix must not be empty and must
// not include a slash character.
//
// Replacement must be made for object keys containing special characters (such
@@ -24886,10 +27264,16 @@ type Initiator struct {
_ struct{} `type:"structure"`
// Name of the Principal.
+ //
+ // This functionality is not supported for directory buckets.
DisplayName *string `type:"string"`
// If the principal is an Amazon Web Services account, it provides the Canonical
// User ID. If the principal is an IAM User, it provides a user ARN value.
+ //
+ // Directory buckets - If the principal is an Amazon Web Services account, it
+ // provides the Amazon Web Services account ID. If the principal is an IAM User,
+ // it provides a user ARN value.
ID *string `type:"string"`
}
@@ -26219,7 +28603,9 @@ func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator {
}
// The Filter is used to identify objects that a Lifecycle Rule applies to.
-// A Filter must have exactly one of Prefix, Tag, or And specified.
+// A Filter can have exactly one of Prefix, Tag, ObjectSizeGreaterThan, ObjectSizeLessThan,
+// or And specified. If the Filter element is left empty, the Lifecycle Rule
+// applies to all objects in the bucket.
type LifecycleRuleFilter struct {
_ struct{} `type:"structure"`
@@ -26325,9 +28711,9 @@ type ListBucketAnalyticsConfigurationsInput struct {
// should begin.
ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -26651,9 +29037,9 @@ type ListBucketInventoryConfigurationsInput struct {
// that Amazon S3 understands.
ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -26820,9 +29206,9 @@ type ListBucketMetricsConfigurationsInput struct {
// value that Amazon S3 understands.
ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -27038,24 +29424,123 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
return s
}
+type ListDirectoryBucketsInput struct {
+ _ struct{} `locationName:"ListDirectoryBucketsRequest" type:"structure"`
+
+ // ContinuationToken indicates to Amazon S3 that the list is being continued
+ // on this bucket with a token. ContinuationToken is obfuscated and is not a
+ // real key. You can use this ContinuationToken for pagination of the list results.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+
+ // Maximum number of buckets to be returned in response. When the number is
+ // more than the count of buckets that are owned by an Amazon Web Services account,
+ // return all the buckets in response.
+ MaxDirectoryBuckets *int64 `location:"querystring" locationName:"max-directory-buckets" type:"integer"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListDirectoryBucketsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListDirectoryBucketsInput) GoString() string {
+ return s.String()
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListDirectoryBucketsInput) SetContinuationToken(v string) *ListDirectoryBucketsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetMaxDirectoryBuckets sets the MaxDirectoryBuckets field's value.
+func (s *ListDirectoryBucketsInput) SetMaxDirectoryBuckets(v int64) *ListDirectoryBucketsInput {
+ s.MaxDirectoryBuckets = &v
+ return s
+}
+
+type ListDirectoryBucketsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The list of buckets owned by the requester.
+ Buckets []*Bucket `locationNameList:"Bucket" type:"list"`
+
+ // If ContinuationToken was sent with the request, it is included in the response.
+ // You can use the returned ContinuationToken for pagination of the list response.
+ ContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListDirectoryBucketsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s ListDirectoryBucketsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBuckets sets the Buckets field's value.
+func (s *ListDirectoryBucketsOutput) SetBuckets(v []*Bucket) *ListDirectoryBucketsOutput {
+ s.Buckets = v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListDirectoryBucketsOutput) SetContinuationToken(v string) *ListDirectoryBucketsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
type ListMultipartUploadsInput struct {
_ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"`
// The name of the bucket to which the multipart upload was initiated.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -27069,6 +29554,8 @@ type ListMultipartUploadsInput struct {
// parameter, then the substring starts at the beginning of the key. The keys
// that are grouped under CommonPrefixes result element are not returned elsewhere
// in the response.
+ //
+ // Directory buckets - For directory buckets, / is the only supported delimiter.
Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
// Requests Amazon S3 to encode the object keys in the response and specifies
@@ -27079,20 +29566,28 @@ type ListMultipartUploadsInput struct {
// keys in the response.
EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
- // Together with upload-id-marker, this parameter specifies the multipart upload
- // after which listing should begin.
- //
- // If upload-id-marker is not specified, only the keys lexicographically greater
- // than the specified key-marker will be included in the list.
- //
- // If upload-id-marker is specified, any multipart uploads for a key equal to
- // the key-marker might also be included, provided those multipart uploads have
- // upload IDs lexicographically greater than the specified upload-id-marker.
+ // Specifies the multipart upload after which listing should begin.
+ //
+ // * General purpose buckets - For general purpose buckets, key-marker is
+ // an object key. Together with upload-id-marker, this parameter specifies
+ // the multipart upload after which listing should begin. If upload-id-marker
+ // is not specified, only the keys lexicographically greater than the specified
+ // key-marker will be included in the list. If upload-id-marker is specified,
+ // any multipart uploads for a key equal to the key-marker might also be
+ // included, provided those multipart uploads have upload IDs lexicographically
+ // greater than the specified upload-id-marker.
+ //
+ // * Directory buckets - For directory buckets, key-marker is obfuscated
+ // and isn't a real object key. The upload-id-marker parameter isn't supported
+ // by directory buckets. To list the additional multipart uploads, you only
+ // need to set the value of key-marker to the NextKeyMarker value from the
+ // previous response. In the ListMultipartUploads response, the multipart
+ // uploads aren't sorted lexicographically based on the object keys.
KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
// Sets the maximum number of multipart uploads, from 1 to 1,000, to return
@@ -27104,13 +29599,20 @@ type ListMultipartUploadsInput struct {
// prefix. You can use prefixes to separate a bucket into different grouping
// of keys. (You can think of using prefix to make groups in the same way that
// you'd use a folder in a file system.)
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a delimiter
+ // (/) are supported.
Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Together with key-marker, specifies the multipart upload after which listing
@@ -27118,6 +29620,8 @@ type ListMultipartUploadsInput struct {
// is ignored. Otherwise, any multipart uploads for a key equal to the key-marker
// might be included in the list only if they have an upload ID lexicographically
// greater than the specified upload-id-marker.
+ //
+ // This functionality is not supported for directory buckets.
UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"`
}
@@ -27253,10 +29757,15 @@ type ListMultipartUploadsOutput struct {
// If you specify a delimiter in the request, then the result returns each distinct
// key prefix containing the delimiter in a CommonPrefixes element. The distinct
// key prefixes are returned in the Prefix child element.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a delimiter
+ // (/) are supported.
CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
// Contains the delimiter you specified in the request. If you don't specify
// a delimiter in your request, this element is absent from the response.
+ //
+ // Directory buckets - For directory buckets, / is the only supported delimiter.
Delimiter *string `type:"string"`
// Encoding type used by Amazon S3 to encode object keys in the response.
@@ -27287,17 +29796,30 @@ type ListMultipartUploadsOutput struct {
// When a list is truncated, this element specifies the value that should be
// used for the upload-id-marker request parameter in a subsequent request.
+ //
+ // This functionality is not supported for directory buckets.
NextUploadIdMarker *string `type:"string"`
// When a prefix is provided in the request, this field contains the specified
// prefix. The result contains only keys starting with the specified prefix.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a delimiter
+ // (/) are supported.
Prefix *string `type:"string"`
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
- // Upload ID after which listing began.
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter
+ // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker
+ // might be included in the list only if they have an upload ID lexicographically
+ // greater than the specified upload-id-marker.
+ //
+ // This functionality is not supported for directory buckets.
UploadIdMarker *string `type:"string"`
// Container for elements related to a particular multipart upload. A response
@@ -27431,9 +29953,9 @@ type ListObjectVersionsInput struct {
// keys in the response.
EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Specifies the key to start with when listing objects in a bucket.
@@ -27459,10 +29981,14 @@ type ListObjectVersionsInput struct {
Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Specifies the object version you want to start listing from.
@@ -27656,6 +30182,8 @@ type ListObjectVersionsOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// Marks the last version of the key returned in a truncated response.
@@ -27772,19 +30300,33 @@ type ListObjectsInput struct {
// The name of the bucket containing the objects.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -27801,9 +30343,9 @@ type ListObjectsInput struct {
// keys in the response.
EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
@@ -27980,7 +30522,9 @@ type ListObjectsOutput struct {
// the MaxKeys value.
Delimiter *string `type:"string"`
- // Encoding type used by Amazon S3 to encode object keys in the response.
+ // Encoding type used by Amazon S3 to encode object keys in the response. If
+ // using url, non-ASCII characters used in an object's key name will be URL
+ // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.
EncodingType *string `type:"string" enum:"EncodingType"`
// A flag that indicates whether Amazon S3 returned all of the results that
@@ -28014,6 +30558,8 @@ type ListObjectsOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
}
@@ -28104,21 +30650,33 @@ func (s *ListObjectsOutput) SetRequestCharged(v string) *ListObjectsOutput {
type ListObjectsV2Input struct {
_ struct{} `locationName:"ListObjectsV2Request" type:"structure"`
- // Bucket name to list.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -28126,23 +30684,37 @@ type ListObjectsV2Input struct {
// ContinuationToken indicates to Amazon S3 that the list is being continued
// on this bucket with a token. ContinuationToken is obfuscated and is not a
- // real key.
+ // real key. You can use this ContinuationToken for pagination of the list results.
ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
// A delimiter is a character that you use to group keys.
+ //
+ // * Directory buckets - For directory buckets, / is the only supported delimiter.
+ //
+ // * Directory buckets - When you query ListObjectsV2 with a delimiter during
+ // in-progress multipart uploads, the CommonPrefixes response parameter contains
+ // the prefixes that are associated with the in-progress multipart uploads.
+ // For more information about multipart uploads, see Multipart Upload Overview
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in
+ // the Amazon S3 User Guide.
Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
- // Encoding type used by Amazon S3 to encode object keys in the response.
+ // Encoding type used by Amazon S3 to encode object keys in the response. If
+ // using url, non-ASCII characters used in an object's key name will be URL
+ // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.
EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The owner field is not present in ListObjectsV2 by default. If you want to
// return the owner field with each key in the result, then set the FetchOwner
// field to true.
+ //
+ // Directory buckets - For directory buckets, the bucket owner is returned as
+ // the object owner for all objects.
FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"`
// Sets the maximum number of keys returned in the response. By default, the
@@ -28152,18 +30724,27 @@ type ListObjectsV2Input struct {
// Specifies the optional fields that you want returned in the response. Fields
// that you do not specify are not returned.
+ //
+ // This functionality is not supported for directory buckets.
OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"`
// Limits the response to keys that begin with the specified prefix.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a delimiter
+ // (/) are supported.
Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
// Confirms that the requester knows that she or he will be charged for the
// list objects request in V2 style. Bucket owners need not specify this parameter
// in their requests.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
// listing after this specified key. StartAfter can be any key in the bucket.
+ //
+ // This functionality is not supported for directory buckets.
StartAfter *string `location:"querystring" locationName:"start-after" type:"string"`
}
@@ -28304,8 +30885,9 @@ func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) {
type ListObjectsV2Output struct {
_ struct{} `type:"structure"`
- // All of the keys (up to 1,000) rolled up into a common prefix count as a single
- // return when calculating the number of returns.
+ // All of the keys (up to 1,000) that share the same prefix are grouped together.
+ // When counting the total numbers of returns by this API operation, this group
+ // of keys is considered as one item.
//
// A response can contain CommonPrefixes only if you specify a delimiter.
//
@@ -28319,12 +30901,24 @@ type ListObjectsV2Output struct {
// in notes/summer/july, the common prefix is notes/summer/. All of the keys
// that roll up into a common prefix count as a single return when calculating
// the number of returns.
+ //
+ // * Directory buckets - For directory buckets, only prefixes that end in
+ // a delimiter (/) are supported.
+ //
+ // * Directory buckets - When you query ListObjectsV2 with a delimiter during
+ // in-progress multipart uploads, the CommonPrefixes response parameter contains
+ // the prefixes that are associated with the in-progress multipart uploads.
+ // For more information about multipart uploads, see Multipart Upload Overview
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in
+ // the Amazon S3 User Guide.
CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
// Metadata about each object returned.
Contents []*Object `type:"list" flattened:"true"`
// If ContinuationToken was sent with the request, it is included in the response.
+ // You can use the returned ContinuationToken for pagination of the list response.
+ // You can use this ContinuationToken for pagination of the list results.
ContinuationToken *string `type:"string"`
// Causes keys that contain the same string between the prefix and the first
@@ -28332,6 +30926,8 @@ type ListObjectsV2Output struct {
// in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere
// in the response. Each rolled-up result counts as only one return against
// the MaxKeys value.
+ //
+ // Directory buckets - For directory buckets, / is the only supported delimiter.
Delimiter *string `type:"string"`
// Encoding type used by Amazon S3 to encode object key names in the XML response.
@@ -28359,21 +30955,6 @@ type ListObjectsV2Output struct {
MaxKeys *int64 `type:"integer"`
// The bucket name.
- //
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
- //
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
Name *string `type:"string"`
// NextContinuationToken is sent when isTruncated is true, which means there
@@ -28383,13 +30964,20 @@ type ListObjectsV2Output struct {
NextContinuationToken *string `type:"string"`
// Keys that begin with the indicated prefix.
+ //
+ // Directory buckets - For directory buckets, only prefixes that end in a delimiter
+ // (/) are supported.
Prefix *string `type:"string"`
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// If StartAfter was sent with the request, it is included in the response.
+ //
+ // This functionality is not supported for directory buckets.
StartAfter *string `type:"string"`
}
@@ -28494,27 +31082,41 @@ type ListPartsInput struct {
// The name of the bucket to which the parts are being uploaded.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Object key for which the multipart upload was initiated.
@@ -28530,16 +31132,22 @@ type ListPartsInput struct {
PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The server-side encryption (SSE) algorithm used to encrypt the object. This
// parameter is needed only when the object was created using a checksum algorithm.
// For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// The server-side encryption (SSE) customer managed key. This parameter is
@@ -28547,6 +31155,8 @@ type ListPartsInput struct {
// information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by ListPartsInput's
// String and GoString methods.
@@ -28556,6 +31166,8 @@ type ListPartsInput struct {
// is needed only when the object was created using a checksum algorithm. For
// more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Upload ID identifying the multipart upload whose parts are being listed.
@@ -28720,11 +31332,15 @@ type ListPartsOutput struct {
//
// The response will also include the x-amz-abort-rule-id header that will provide
// the ID of the lifecycle configuration rule that defines this action.
+ //
+ // This functionality is not supported for directory buckets.
AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"`
// This header is returned along with the x-amz-abort-date header. It identifies
// applicable lifecycle configuration rule that defines the action to abort
// incomplete multipart uploads.
+ //
+ // This functionality is not supported for directory buckets.
AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
// The name of the bucket to which the multipart upload was initiated. Does
@@ -28759,11 +31375,13 @@ type ListPartsOutput struct {
// Container element that identifies the object owner, after the object is created.
// If multipart upload is initiated by an IAM user, this element provides the
// parent account ID and display name.
+ //
+ // Directory buckets - The bucket owner is returned as the object owner for
+ // all the parts.
Owner *Owner `type:"structure"`
- // When a list is truncated, this element specifies the last part in the list,
- // as well as the value to use for the part-number-marker request parameter
- // in a subsequent request.
+ // Specifies the part after which listing should begin. Only parts with higher
+ // part numbers will be listed.
PartNumberMarker *int64 `type:"integer"`
// Container for elements related to a particular part. A response can contain
@@ -28772,10 +31390,14 @@ type ListPartsOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
- // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded
- // object.
+ // The class of storage used to store the uploaded object.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported
+ // by directory buckets to store objects.
StorageClass *string `type:"string" enum:"StorageClass"`
// Upload ID identifying the multipart upload whose parts are being listed.
@@ -29033,6 +31655,56 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location {
return s
}
+// Specifies the location where the bucket will be created.
+//
+// For directory buckets, the location type is Availability Zone. For more information
+// about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
+// in the Amazon S3 User Guide.
+//
+// This functionality is only supported by directory buckets.
+type LocationInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the location where the bucket will be created.
+ //
+ // For directory buckets, the name of the location is the AZ ID of the Availability
+ // Zone where the bucket will be created. An example AZ ID value is usw2-az1.
+ Name *string `type:"string"`
+
+ // The type of location where the bucket will be created.
+ Type *string `type:"string" enum:"LocationType"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LocationInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s LocationInfo) GoString() string {
+ return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *LocationInfo) SetName(v string) *LocationInfo {
+ s.Name = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *LocationInfo) SetType(v string) *LocationInfo {
+ s.Type = &v
+ return s
+}
+
// Describes where logs are stored and the prefix that Amazon S3 assigns to
// all log object keys for a bucket. For more information, see PUT Bucket logging
// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
@@ -29058,6 +31730,9 @@ type LoggingEnabled struct {
// in the Amazon S3 User Guide.
TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
+ // Amazon S3 key format for log objects.
+ TargetObjectKeyFormat *TargetObjectKeyFormat `type:"structure"`
+
// A prefix for all log object keys. If you store log files from multiple Amazon
// S3 buckets in a single bucket, you can use a prefix to distinguish which
// log files came from which bucket.
@@ -29122,6 +31797,12 @@ func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled {
return s
}
+// SetTargetObjectKeyFormat sets the TargetObjectKeyFormat field's value.
+func (s *LoggingEnabled) SetTargetObjectKeyFormat(v *TargetObjectKeyFormat) *LoggingEnabled {
+ s.TargetObjectKeyFormat = v
+ return s
+}
+
// SetTargetPrefix sets the TargetPrefix field's value.
func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled {
s.TargetPrefix = &v
@@ -29469,9 +32150,15 @@ type MultipartUpload struct {
Key *string `min:"1" type:"string"`
// Specifies the owner of the object that is part of the multipart upload.
+ //
+ // Directory buckets - The bucket owner is returned as the object owner for
+ // all the objects.
Owner *Owner `type:"structure"`
// The class of storage used to store the object.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported
+ // by directory buckets to store objects.
StorageClass *string `type:"string" enum:"StorageClass"`
// Upload ID that identifies the multipart upload.
@@ -29546,9 +32233,10 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload {
type NoncurrentVersionExpiration struct {
_ struct{} `type:"structure"`
- // Specifies how many noncurrent versions Amazon S3 will retain. If there are
- // this many more recent noncurrent versions, Amazon S3 will take the associated
- // action. For more information about noncurrent versions, see Lifecycle configuration
+ // Specifies how many noncurrent versions Amazon S3 will retain. You can specify
+ // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete
+ // any additional noncurrent versions beyond the specified number to retain.
+ // For more information about noncurrent versions, see Lifecycle configuration
// elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html)
// in the Amazon S3 User Guide.
NewerNoncurrentVersions *int64 `type:"integer"`
@@ -29601,10 +32289,11 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers
type NoncurrentVersionTransition struct {
_ struct{} `type:"structure"`
- // Specifies how many noncurrent versions Amazon S3 will retain. If there are
- // this many more recent noncurrent versions, Amazon S3 will take the associated
- // action. For more information about noncurrent versions, see Lifecycle configuration
- // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html)
+ // Specifies how many noncurrent versions Amazon S3 will retain in the same
+ // storage class before transitioning objects. You can specify up to 100 noncurrent
+ // versions to retain. Amazon S3 will transition any additional noncurrent versions
+ // beyond the specified number to retain. For more information about noncurrent
+ // versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html)
// in the Amazon S3 User Guide.
NewerNoncurrentVersions *int64 `type:"integer"`
@@ -29873,6 +32562,8 @@ type Object struct {
// encryption. If an object is larger than 16 MB, the Amazon Web Services
// Management Console will upload or copy that object as a Multipart Upload,
// and therefore the ETag will not be an MD5 digest.
+ //
+ // Directory buckets - MD5 is not supported by directory buckets.
ETag *string `type:"string"`
// The name that you assign to an object. You use the object key to retrieve
@@ -29883,6 +32574,8 @@ type Object struct {
LastModified *time.Time `type:"timestamp"`
// The owner of the object
+ //
+ // Directory buckets - The bucket owner is returned as the object owner.
Owner *Owner `type:"structure"`
// Specifies the restoration status of an object. Objects in certain storage
@@ -29890,12 +32583,18 @@ type Object struct {
// about these storage classes and how to work with archived objects, see Working
// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets. Only the S3 Express
+ // One Zone storage class is supported by directory buckets to store objects.
RestoreStatus *RestoreStatus `type:"structure"`
// Size in bytes of the object
- Size *int64 `type:"integer"`
+ Size *int64 `type:"long"`
// The class of storage used to store the object.
+ //
+ // Directory buckets - Only the S3 Express One Zone storage class is supported
+ // by directory buckets to store objects.
StorageClass *string `type:"string" enum:"ObjectStorageClass"`
}
@@ -29978,7 +32677,9 @@ type ObjectIdentifier struct {
// Key is a required field
Key *string `min:"1" type:"string" required:"true"`
- // VersionId for the specific version of the object to delete.
+ // Version ID for the specific version of the object to delete.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `type:"string"`
}
@@ -30193,26 +32894,32 @@ type ObjectPart struct {
ChecksumCRC32 *string `type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use the API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `type:"string"`
@@ -30221,7 +32928,7 @@ type ObjectPart struct {
PartNumber *int64 `type:"integer"`
// The size of the uploaded part in bytes.
- Size *int64 `type:"integer"`
+ Size *int64 `type:"long"`
}
// String returns the string representation.
@@ -30295,7 +33002,7 @@ type ObjectVersion struct {
// The object key.
Key *string `min:"1" type:"string"`
- // Date and time the object was last modified.
+ // Date and time when the object was last modified.
LastModified *time.Time `type:"timestamp"`
// Specifies the owner of the object.
@@ -30309,7 +33016,7 @@ type ObjectVersion struct {
RestoreStatus *RestoreStatus `type:"structure"`
// Size in bytes of the object.
- Size *int64 `type:"integer"`
+ Size *int64 `type:"long"`
// The class of storage used to store the object.
StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"`
@@ -30506,6 +33213,8 @@ type Owner struct {
// * Europe (Ireland)
//
// * South America (São Paulo)
+ //
+ // This functionality is not supported for directory buckets.
DisplayName *string `type:"string"`
// Container for the ID of the owner.
@@ -30615,8 +33324,19 @@ type OwnershipControlsRule struct {
// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer
// affect permissions. The bucket owner automatically owns and has full control
// over every object in the bucket. The bucket only accepts PUT requests that
- // don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control
- // canned ACL or an equivalent form of this ACL expressed in the XML format.
+ // don't specify an ACL or specify bucket owner full control ACLs (such as the
+ // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format
+ // that grants the same permissions).
+ //
+ // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled.
+ // We recommend keeping ACLs disabled, except in uncommon use cases where you
+ // must control access for each object individually. For more information about
+ // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs
+ // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets. Directory buckets
+ // use the bucket owner enforced setting for S3 Object Ownership.
//
// ObjectOwnership is a required field
ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"`
@@ -30694,18 +33414,22 @@ type Part struct {
ChecksumCRC32 *string `type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use the API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `type:"string"`
@@ -30727,7 +33451,7 @@ type Part struct {
PartNumber *int64 `type:"integer"`
// Size in bytes of the uploaded part data.
- Size *int64 `type:"integer"`
+ Size *int64 `type:"long"`
}
// String returns the string representation.
@@ -30796,6 +33520,44 @@ func (s *Part) SetSize(v int64) *Part {
return s
}
+// Amazon S3 keys for log objects are partitioned in the following format:
+//
+// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
+//
+// PartitionedPrefix defaults to EventTime delivery when server access logs
+// are delivered.
+type PartitionedPrefix struct {
+ _ struct{} `locationName:"PartitionedPrefix" type:"structure"`
+
+ // Specifies the partition date source for the partitioned prefix. PartitionDateSource
+ // can be EventTime or DeliveryTime.
+ PartitionDateSource *string `type:"string" enum:"PartitionDateSource"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PartitionedPrefix) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PartitionedPrefix) GoString() string {
+ return s.String()
+}
+
+// SetPartitionDateSource sets the PartitionDateSource field's value.
+func (s *PartitionedPrefix) SetPartitionDateSource(v string) *PartitionedPrefix {
+ s.PartitionDateSource = &v
+ return s
+}
+
// The container element for a bucket's policy status.
type PolicyStatus struct {
_ struct{} `type:"structure"`
@@ -31043,12 +33805,12 @@ type PutBucketAccelerateConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -31060,9 +33822,9 @@ type PutBucketAccelerateConfigurationInput struct {
// must be populated with the algorithm's checksum of the request payload.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -31197,12 +33959,12 @@ type PutBucketAclInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -31218,9 +33980,9 @@ type PutBucketAclInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Allows grantee the read, write, read ACP, and write ACP permissions on the
@@ -31411,9 +34173,9 @@ type PutBucketAnalyticsConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The ID that identifies the analytics configuration.
@@ -31563,12 +34325,12 @@ type PutBucketCorsInput struct {
// CORSConfiguration is a required field
CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -31584,9 +34346,9 @@ type PutBucketCorsInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -31727,12 +34489,12 @@ type PutBucketEncryptionInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -31748,9 +34510,9 @@ type PutBucketEncryptionInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Specifies the default server-side-encryption configuration.
@@ -32028,9 +34790,9 @@ type PutBucketInventoryConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The ID used to identify the inventory configuration.
@@ -32177,12 +34939,12 @@ type PutBucketLifecycleConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -32198,9 +34960,9 @@ type PutBucketLifecycleConfigurationInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Container for lifecycle rules. You can add as many as 1,000 rules.
@@ -32332,12 +35094,12 @@ type PutBucketLifecycleInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -32353,9 +35115,9 @@ type PutBucketLifecycleInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Container for lifecycle rules. You can add as many as 1000 rules.
@@ -32497,12 +35259,12 @@ type PutBucketLoggingInput struct {
// BucketLoggingStatus is a required field
BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -32518,9 +35280,9 @@ type PutBucketLoggingInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
}
@@ -32654,9 +35416,9 @@ type PutBucketMetricsConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The ID used to identify the metrics configuration. The ID has a 64 character
@@ -32804,9 +35566,9 @@ type PutBucketNotificationConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// A container for specifying the notification configuration of the bucket.
@@ -32950,12 +35712,12 @@ type PutBucketNotificationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -32971,9 +35733,9 @@ type PutBucketNotificationInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The container for the configuration.
@@ -33107,9 +35869,9 @@ type PutBucketOwnershipControlsInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter)
@@ -33240,19 +36002,45 @@ type PutBucketPolicyInput struct {
// The name of the bucket.
//
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name
+ // . Virtual-hosted-style requests aren't supported. Directory bucket names
+ // must be unique in the chosen Availability Zone. Bucket names must also follow
+ // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3).
+ // For information about bucket naming restrictions, see Directory bucket naming
+ // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon
+ // S3 fails the request with the HTTP status code 400 Bad Request.
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the supported
+ // algorithm from the following list:
+ //
+ // * CRC32
+ //
+ // * CRC32C
+ //
+ // * SHA1
+ //
+ // * SHA256
+ //
+ // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
- // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
- // parameter.
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm,
+ // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum
+ // algorithm that matches the provided value in x-amz-checksum-algorithm .
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
//
// The AWS SDK for Go v1 does not support automatic computing request payload
// checksum. This feature is available in the AWS SDK for Go v2. If a value
@@ -33266,15 +36054,24 @@ type PutBucketPolicyInput struct {
// Set this parameter to true to confirm that you want to remove your permissions
// to change this bucket policy in the future.
+ //
+ // This functionality is not supported for directory buckets.
ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
+ //
+ // For directory buckets, this header is not supported in this API operation.
+ // If you specify this header, the request fails with the HTTP status code 501
+ // Not Implemented.
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The bucket policy as a JSON document.
//
+ // For directory buckets, the only IAM action supported in the bucket policy
+ // is s3express:CreateSession.
+ //
// Policy is a required field
Policy *string `type:"string" required:"true"`
}
@@ -33410,12 +36207,12 @@ type PutBucketReplicationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -33431,9 +36228,9 @@ type PutBucketReplicationInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// A container for replication rules. You can add up to 1,000 rules. The maximum
@@ -33582,12 +36379,12 @@ type PutBucketRequestPaymentInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -33603,9 +36400,9 @@ type PutBucketRequestPaymentInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Container for Payer.
@@ -33744,12 +36541,12 @@ type PutBucketTaggingInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -33765,9 +36562,9 @@ type PutBucketTaggingInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Container for the TagSet and Tag elements.
@@ -33906,12 +36703,12 @@ type PutBucketVersioningInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -33927,9 +36724,9 @@ type PutBucketVersioningInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The concatenation of the authentication device's serial number, a space,
@@ -34073,12 +36870,12 @@ type PutBucketWebsiteInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -34094,9 +36891,9 @@ type PutBucketWebsiteInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Container for the request.
@@ -34240,22 +37037,33 @@ type PutObjectAclInput struct {
// The bucket name that contains the object to which you want to attach the
// ACL.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // in the Amazon S3 User Guide.
+ //
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -34271,25 +37079,25 @@ type PutObjectAclInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Allows grantee the read, write, read ACP, and write ACP permissions on the
// bucket.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to list the objects in the bucket.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the bucket ACL.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to create new objects in the bucket.
@@ -34300,37 +37108,28 @@ type PutObjectAclInput struct {
// Allows grantee to write the ACL for the applicable bucket.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Key for which the PUT action was initiated.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
- // When using this action with an access point through the Amazon Web Services
- // SDKs, you provide the access point ARN in place of the bucket name. For more
- // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
- // in the Amazon S3 User Guide.
- //
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
- // in the Amazon S3 User Guide.
- //
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // VersionId used to reference a specific version of the object.
+ // Version ID used to reference a specific version of the object.
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -34496,6 +37295,8 @@ type PutObjectAclOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
}
@@ -34527,9 +37328,32 @@ type PutObjectInput struct {
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
// The canned ACL to apply to the object. For more information, see Canned ACL
- // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
+ // in the Amazon S3 User Guide.
+ //
+ // When adding a new object, you can use headers to grant ACL-based permissions
+ // to individual Amazon Web Services accounts or to predefined groups defined
+ // by Amazon S3. These permissions are then added to the ACL on the object.
+ // By default, all objects are private. Only the owner has full access control.
+ // For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+ // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html)
+ // in the Amazon S3 User Guide.
+ //
+ // If the bucket that you're uploading objects to uses the bucket owner enforced
+ // setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions.
+ // Buckets that use this setting only accept PUT requests that don't specify
+ // an ACL or PUT requests that specify bucket owner full control ACLs, such
+ // as the bucket-owner-full-control canned ACL or an equivalent form of this
+ // ACL expressed in the XML format. PUT requests that contain other ACLs (for
+ // example, custom grants to certain Amazon Web Services accounts) fail and
+ // return a 400 error with the error code AccessControlListNotSupported. For
+ // more information, see Controlling ownership of objects and disabling ACLs
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
+ // in the Amazon S3 User Guide.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
// Object data.
@@ -34537,19 +37361,33 @@ type PutObjectInput struct {
// The bucket name to which the PUT action was initiated.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -34562,6 +37400,8 @@ type PutObjectInput struct {
//
// Specifying this header with a PUT action doesn’t affect bucket-level settings
// for S3 Bucket Key.
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// Can be used to specify caching behavior along the request/reply chain. For
@@ -34569,16 +37409,33 @@ type PutObjectInput struct {
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9).
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon
+ // S3 fails the request with the HTTP status code 400 Bad Request.
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the supported
+ // algorithm from the following list:
+ //
+ // * CRC32
+ //
+ // * CRC32C
+ //
+ // * SHA1
+ //
+ // * SHA256
+ //
+ // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
- // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
- // parameter.
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm,
+ // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum
+ // algorithm that matches the provided value in x-amz-checksum-algorithm .
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
//
// The AWS SDK for Go v1 does not support automatic computing request payload
// checksum. This feature is available in the AWS SDK for Go v2. If a value
@@ -34638,15 +37495,22 @@ type PutObjectInput struct {
// it is optional, we recommend using the Content-MD5 mechanism as an end-to-end
// integrity check. For more information about REST request authentication,
// see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
+ //
+ // The Content-MD5 header is required for any request to upload an object with
+ // a retention period configured using Amazon S3 Object Lock. For more information
+ // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
// A standard MIME type describing the format of the contents. For more information,
// see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type).
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The date and time at which the object is no longer cacheable. For more information,
@@ -34655,22 +37519,30 @@ type PutObjectInput struct {
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to read the object data and its metadata.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the object ACL.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to write the ACL for the applicable object.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Object key for which the PUT action was initiated.
@@ -34682,25 +37554,37 @@ type PutObjectInput struct {
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// Specifies whether a legal hold will be applied to this object. For more information
- // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
+ // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
// The Object Lock mode that you want to apply to this object.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
// The date and time when you want this object's Object Lock to expire. Must
// be formatted as a timestamp parameter.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (for example,
- // AES256).
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
@@ -34709,6 +37593,8 @@ type PutObjectInput struct {
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by PutObjectInput's
// String and GoString methods.
@@ -34717,13 +37603,18 @@ type PutObjectInput struct {
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
// JSON with the encryption context key-value pairs. This value is stored as
// object metadata and automatically gets passed on to Amazon Web Services KMS
- // for future GetObject or CopyObject operations on this object.
+ // for future GetObject or CopyObject operations on this object. This value
+ // must be explicitly added during CopyObject operations.
+ //
+ // This functionality is not supported for directory buckets.
//
// SSEKMSEncryptionContext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by PutObjectInput's
@@ -34731,39 +37622,62 @@ type PutObjectInput struct {
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
// If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse,
- // this header specifies the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. If you specify
- // x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse,
+ // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management
+ // Service (KMS) symmetric encryption customer managed key that was used for
+ // the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse,
// but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3
// uses the Amazon Web Services managed key (aws/s3) to protect the data. If
// the KMS key does not exist in the same account that's issuing the command,
// you must use the full ARN and not just the ID.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by PutObjectInput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
- // S3 (for example, AES256, aws:kms, aws:kms:dsse).
+ // The server-side encryption algorithm that was used when you store this object
+ // in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).
+ //
+ // General purpose buckets - You have four mutually exclusive options to protect
+ // data using server-side encryption in Amazon S3, depending on how you choose
+ // to manage the encryption keys. Specifically, the encryption key options are
+ // Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or
+ // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with
+ // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default.
+ // You can optionally tell Amazon S3 to encrypt data at rest by using server-side
+ // encryption with other key options. For more information, see Using Server-Side
+ // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Directory buckets - For directory buckets, only the server-side encryption
+ // with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high availability.
// Depending on performance needs, you can specify a different Storage Class.
- // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
- // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+ // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide.
+ //
+ // * For directory buckets, only the S3 Express One Zone storage class is
+ // supported to store newly created objects.
+ //
+ // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
// (For example, "Key1=Value1")
+ //
+ // This functionality is not supported for directory buckets.
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata. For information about object
- // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html).
+ // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html)
+ // in the Amazon S3 User Guide.
//
// In the following example, the request header sets the redirect to an object
// (anotherPage.html) in the same bucket:
@@ -34777,7 +37691,10 @@ type PutObjectInput struct {
//
// For more information about website hosting in Amazon S3, see Hosting Websites
// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
- // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
+ // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}
@@ -35090,8 +38007,10 @@ type PutObjectLegalHoldInput struct {
// The bucket name containing the object that you want to place a legal hold
// on.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
@@ -35100,12 +38019,12 @@ type PutObjectLegalHoldInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -35121,9 +38040,9 @@ type PutObjectLegalHoldInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The key name for the object that you want to place a legal hold on.
@@ -35136,10 +38055,14 @@ type PutObjectLegalHoldInput struct {
LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The version ID of the object that you want to place a legal hold on.
@@ -35267,6 +38190,8 @@ type PutObjectLegalHoldOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
}
@@ -35302,12 +38227,12 @@ type PutObjectLockConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -35323,19 +38248,23 @@ type PutObjectLockConfigurationInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The Object Lock configuration that you want to apply to the specified bucket.
ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// A token to allow Object Lock to be enabled for an existing bucket.
@@ -35451,6 +38380,8 @@ type PutObjectLockConfigurationOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
}
@@ -35483,89 +38414,133 @@ type PutObjectOutput struct {
// Indicates whether the uploaded object uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use the API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"`
// Entity tag for the uploaded object.
+ //
+ // General purpose buckets - To ensure that data is not corrupted traversing
+ // the network, for objects where the ETag is the MD5 digest of the object,
+ // you can calculate the MD5 while putting an object to Amazon S3 and compare
+ // the returned ETag to the calculated MD5 value.
+ //
+ // Directory buckets - The ETag for the object in a directory bucket isn't the
+ // MD5 digest of the object.
ETag *string `location:"header" locationName:"ETag" type:"string"`
// If the expiration is configured for the object (see PutBucketLifecycleConfiguration
- // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)),
- // the response includes this header. It includes the expiry-date and rule-id
- // key-value pairs that provide information about object expiration. The value
- // of the rule-id is URL-encoded.
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html))
+ // in the Amazon S3 User Guide, the response includes this header. It includes
+ // the expiry-date and rule-id key-value pairs that provide information about
+ // object expiration. The value of the rule-id is URL-encoded.
+ //
+ // This functionality is not supported for directory buckets.
Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header confirming the encryption algorithm
- // used.
+ // the response will include this header to confirm the encryption algorithm
+ // that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round-trip message integrity
+ // the response will include this header to provide the round-trip message integrity
// verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the Amazon Web Services KMS Encryption Context to use
+ // If present, indicates the Amazon Web Services KMS Encryption Context to use
// for object encryption. The value of this header is a base64-encoded UTF-8
// string holding JSON with the encryption context key-value pairs. This value
// is stored as object metadata and automatically gets passed on to Amazon Web
// Services KMS for future GetObject or CopyObject operations on this object.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSEncryptionContext is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by PutObjectOutput's
// String and GoString methods.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
// If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse,
- // this header specifies the ID of the Key Management Service (KMS) symmetric
+ // this header indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by PutObjectOutput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
+ // The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256, aws:kms, aws:kms:dsse).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
- // Version of the object.
+ // Version ID of the object.
+ //
+ // If you enable versioning for a bucket, Amazon S3 automatically generates
+ // a unique version ID for the object being stored. Amazon S3 returns this ID
+ // in the response. When you enable versioning for a bucket, if Amazon S3 receives
+ // multiple write requests for the same object simultaneously, it stores all
+ // of the objects. For more information about versioning, see Adding Objects
+ // to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html)
+ // in the Amazon S3 User Guide. For information about returning the versioning
+ // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html).
+ //
+ // This functionality is not supported for directory buckets.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
}
@@ -35677,8 +38652,10 @@ type PutObjectRetentionInput struct {
// The bucket name that contains the object you want to apply this Object Retention
// configuration to.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
@@ -35690,12 +38667,12 @@ type PutObjectRetentionInput struct {
// Indicates whether this action should bypass Governance-mode restrictions.
BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -35711,9 +38688,9 @@ type PutObjectRetentionInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The key name for the object that you want to apply this Object Retention
@@ -35723,10 +38700,14 @@ type PutObjectRetentionInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// The container element for the Object Retention configuration.
@@ -35864,6 +38845,8 @@ type PutObjectRetentionOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
}
@@ -35896,30 +38879,33 @@ type PutObjectTaggingInput struct {
// The bucket name containing the object.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -35935,9 +38921,9 @@ type PutObjectTaggingInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Name of the object key.
@@ -35946,10 +38932,14 @@ type PutObjectTaggingInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Container for the TagSet and Tag elements
@@ -36125,12 +39115,12 @@ type PutPublicAccessBlockInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -36146,9 +39136,9 @@ type PutPublicAccessBlockInput struct {
// to be used.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The PublicAccessBlock configuration that you want to apply to this Amazon
@@ -37283,30 +40273,33 @@ type RestoreObjectInput struct {
// The bucket name containing the object to restore.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -37318,9 +40311,9 @@ type RestoreObjectInput struct {
// must be populated with the algorithm's checksum of the request payload.
ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Object key for which the action was initiated.
@@ -37329,10 +40322,14 @@ type RestoreObjectInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
// Container for restore job parameters.
@@ -37468,6 +40465,8 @@ type RestoreObjectOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// Indicates the path in the provided S3 output location where Select results
@@ -37626,6 +40625,9 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest {
// about these storage classes and how to work with archived objects, see Working
// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html)
// in the Amazon S3 User Guide.
+//
+// This functionality is not supported for directory buckets. Only the S3 Express
+// One Zone storage class is supported by directory buckets to store objects.
type RestoreStatus struct {
_ struct{} `type:"structure"`
@@ -38193,9 +41195,9 @@ type SelectObjectContentInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The expression that is used to query the object.
@@ -38572,9 +41574,17 @@ type ServerSideEncryptionByDefault struct {
// Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
// KMS key ID to use for the default encryption. This parameter is allowed if
- // and only if SSEAlgorithm is set to aws:kms.
+ // and only if SSEAlgorithm is set to aws:kms or aws:kms:dsse.
+ //
+ // You can specify the key ID, key alias, or the Amazon Resource Name (ARN)
+ // of the KMS key.
+ //
+ // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
+ //
+ // * Key Alias: alias/alias-name
//
- // You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key.
// If you use a key ID, you can run into a LogDestination undeliverable error
// when creating a VPC flow log.
//
@@ -38582,10 +41592,6 @@ type ServerSideEncryptionByDefault struct {
// operations you must use a fully qualified KMS key ARN. For more information,
// see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy).
//
- // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
- //
- // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
- //
// Amazon S3 only supports symmetric encryption KMS keys. For more information,
// see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html)
// in the Amazon Web Services Key Management Service Developer Guide.
@@ -38766,6 +41772,118 @@ func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryp
return s
}
+// The established temporary security credentials of the session.
+//
+// Directory buckets - These session credentials are only supported for the
+// authentication and authorization of Zonal endpoint APIs on directory buckets.
+type SessionCredentials struct {
+ _ struct{} `type:"structure"`
+
+ // A unique identifier that's associated with a secret access key. The access
+ // key ID and the secret access key are used together to sign programmatic Amazon
+ // Web Services requests cryptographically.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `locationName:"AccessKeyId" type:"string" required:"true"`
+
+ // Temporary security credentials expire after a specified interval. After temporary
+ // credentials expire, any calls that you make with those credentials will fail.
+ // So you must generate a new set of temporary credentials. Temporary credentials
+ // cannot be extended or refreshed beyond the original specified interval.
+ //
+ // Expiration is a required field
+ Expiration *time.Time `locationName:"Expiration" type:"timestamp" required:"true"`
+
+ // A key that's used with the access key ID to cryptographically sign programmatic
+ // Amazon Web Services requests. Signing a request identifies the sender and
+ // prevents the request from being altered.
+ //
+ // SecretAccessKey is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by SessionCredentials's
+ // String and GoString methods.
+ //
+ // SecretAccessKey is a required field
+ SecretAccessKey *string `locationName:"SecretAccessKey" type:"string" required:"true" sensitive:"true"`
+
+ // A part of the temporary security credentials. The session token is used to
+ // validate the temporary security credentials.
+ //
+ // SessionToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by SessionCredentials's
+ // String and GoString methods.
+ //
+ // SessionToken is a required field
+ SessionToken *string `locationName:"SessionToken" type:"string" required:"true" sensitive:"true"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s SessionCredentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s SessionCredentials) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *SessionCredentials) SetAccessKeyId(v string) *SessionCredentials {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *SessionCredentials) SetExpiration(v time.Time) *SessionCredentials {
+ s.Expiration = &v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *SessionCredentials) SetSecretAccessKey(v string) *SessionCredentials {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *SessionCredentials) SetSessionToken(v string) *SessionCredentials {
+ s.SessionToken = &v
+ return s
+}
+
+// To use simple format for S3 keys for log objects, set SimplePrefix to an
+// empty object.
+//
+// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]
+type SimplePrefix struct {
+ _ struct{} `locationName:"SimplePrefix" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s SimplePrefix) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s SimplePrefix) GoString() string {
+ return s.String()
+}
+
// A container that describes additional filters for identifying the source
// objects that you want to replicate. You can choose to enable or disable the
// replication of these objects. Currently, Amazon S3 supports only the filter
@@ -39302,6 +42420,49 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant {
return s
}
+// Amazon S3 key format for log objects. Only one format, PartitionedPrefix
+// or SimplePrefix, is allowed.
+type TargetObjectKeyFormat struct {
+ _ struct{} `type:"structure"`
+
+ // Partitioned S3 key for log objects.
+ PartitionedPrefix *PartitionedPrefix `locationName:"PartitionedPrefix" type:"structure"`
+
+ // To use the simple format for S3 keys for log objects. To specify SimplePrefix
+ // format, set SimplePrefix to {}.
+ SimplePrefix *SimplePrefix `locationName:"SimplePrefix" type:"structure"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TargetObjectKeyFormat) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s TargetObjectKeyFormat) GoString() string {
+ return s.String()
+}
+
+// SetPartitionedPrefix sets the PartitionedPrefix field's value.
+func (s *TargetObjectKeyFormat) SetPartitionedPrefix(v *PartitionedPrefix) *TargetObjectKeyFormat {
+ s.PartitionedPrefix = v
+ return s
+}
+
+// SetSimplePrefix sets the SimplePrefix field's value.
+func (s *TargetObjectKeyFormat) SetSimplePrefix(v *SimplePrefix) *TargetObjectKeyFormat {
+ s.SimplePrefix = v
+ return s
+}
+
// The S3 Intelligent-Tiering storage class is designed to optimize storage
// costs by automatically moving data to the most cost-effective storage access
// tier, without additional operational overhead.
@@ -39586,19 +42747,33 @@ type UploadPartCopyInput struct {
// The bucket name.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -39620,34 +42795,81 @@ type UploadPartCopyInput struct {
// my-access-point owned by account 123456789012 in Region us-west-2, use
// the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf.
// The value must be URL encoded. Amazon S3 supports copy operations using
- // access points only when the source and destination buckets are in the
- // same Amazon Web Services Region. Alternatively, for objects accessed through
- // Amazon S3 on Outposts, specify the ARN of the object as accessed in the
- // format arn:aws:s3-outposts:::outpost//object/.
+ // Access points only when the source and destination buckets are in the
+ // same Amazon Web Services Region. Access points are not supported by directory
+ // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts,
+ // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/.
// For example, to copy the object reports/january.pdf through outpost my-outpost
// owned by account 123456789012 in Region us-west-2, use the URL encoding
// of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf.
// The value must be URL-encoded.
//
- // To copy a specific version of an object, append ?versionId= to
- // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).
- // If you don't specify a version ID, Amazon S3 copies the latest version of
- // the source object.
+ // If your bucket has versioning enabled, you could have multiple versions of
+ // the same object. By default, x-amz-copy-source identifies the current version
+ // of the source object to copy. To copy a specific version of the source object
+ // to copy, append ?versionId= to the x-amz-copy-source request
+ // header (for example, x-amz-copy-source: /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893).
+ //
+ // If the current version is a delete marker and you don't specify a versionId
+ // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found
+ // error, because the object does not exist. If you specify versionId in the
+ // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns
+ // an HTTP 400 Bad Request error, because you are not allowed to specify a delete
+ // marker as a version for the x-amz-copy-source.
+ //
+ // Directory buckets - S3 Versioning isn't enabled and supported for directory
+ // buckets.
//
// CopySource is a required field
CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
// Copies the object if its entity tag (ETag) matches the specified tag.
+ //
+ // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+ // headers are present in the request as follows:
+ //
+ // x-amz-copy-source-if-match condition evaluates to true, and;
+ //
+ // x-amz-copy-source-if-unmodified-since condition evaluates to false;
+ //
+ // Amazon S3 returns 200 OK and copies the data.
CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
// Copies the object if it has been modified since the specified time.
+ //
+ // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since
+ // headers are present in the request as follows:
+ //
+ // x-amz-copy-source-if-none-match condition evaluates to false, and;
+ //
+ // x-amz-copy-source-if-modified-since condition evaluates to true;
+ //
+ // Amazon S3 returns 412 Precondition Failed response code.
CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"`
// Copies the object if its entity tag (ETag) is different than the specified
// ETag.
+ //
+ // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since
+ // headers are present in the request as follows:
+ //
+ // x-amz-copy-source-if-none-match condition evaluates to false, and;
+ //
+ // x-amz-copy-source-if-modified-since condition evaluates to true;
+ //
+ // Amazon S3 returns 412 Precondition Failed response code.
CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
// Copies the object if it hasn't been modified since the specified time.
+ //
+ // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
+ // headers are present in the request as follows:
+ //
+ // x-amz-copy-source-if-match condition evaluates to true, and;
+ //
+ // x-amz-copy-source-if-unmodified-since condition evaluates to false;
+ //
+ // Amazon S3 returns 200 OK and copies the data.
CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"`
// The range of bytes to copy from the source object. The range value must use
@@ -39659,12 +42881,18 @@ type UploadPartCopyInput struct {
// Specifies the algorithm to use when decrypting the source object (for example,
// AES256).
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
// the source object. The encryption key provided in this header must be one
// that was used when the source object was created.
//
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
+ //
// CopySourceSSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by UploadPartCopyInput's
// String and GoString methods.
@@ -39673,16 +42901,19 @@ type UploadPartCopyInput struct {
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
- // The account ID of the expected destination bucket owner. If the destination
- // bucket is owned by a different account, the request fails with the HTTP status
- // code 403 Forbidden (access denied).
+ // The account ID of the expected destination bucket owner. If the account ID
+ // that you provide does not match the actual owner of the destination bucket,
+ // the request fails with the HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
- // The account ID of the expected source bucket owner. If the source bucket
- // is owned by a different account, the request fails with the HTTP status code
- // 403 Forbidden (access denied).
+ // The account ID of the expected source bucket owner. If the account ID that
+ // you provide does not match the actual owner of the source bucket, the request
+ // fails with the HTTP status code 403 Forbidden (access denied).
ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"`
// Object key for which the multipart upload was initiated.
@@ -39697,14 +42928,20 @@ type UploadPartCopyInput struct {
PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (for example,
- // AES256).
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
@@ -39714,6 +42951,9 @@ type UploadPartCopyInput struct {
// header. This must be the same encryption key specified in the initiate multipart
// upload request.
//
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
+ //
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by UploadPartCopyInput's
// String and GoString methods.
@@ -39722,6 +42962,9 @@ type UploadPartCopyInput struct {
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
+ //
+ // This functionality is not supported when the destination bucket is a directory
+ // bucket.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Upload ID identifying the multipart upload whose part is being copied.
@@ -39946,6 +43189,8 @@ type UploadPartCopyOutput struct {
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// Container for all response elements.
@@ -39953,32 +43198,46 @@ type UploadPartCopyOutput struct {
// The version of the source object that was copied, if you have enabled versioning
// on the source bucket.
+ //
+ // This functionality is not supported when the source object is in a directory
+ // bucket.
CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header confirming the encryption algorithm
- // used.
+ // the response will include this header to confirm the encryption algorithm
+ // that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round-trip message integrity
+ // the response will include this header to provide the round-trip message integrity
// verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the ID of the Key Management Service (KMS) symmetric
+ // If present, indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by UploadPartCopyOutput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
+ // The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256, aws:kms).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
}
@@ -40056,30 +43315,44 @@ type UploadPartInput struct {
// The name of the bucket to which the multipart upload was initiated.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the
+ // request with the HTTP status code 400 Bad Request. For more information,
+ // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
// If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
@@ -40129,11 +43402,13 @@ type UploadPartInput struct {
// The base64-encoded 128-bit MD5 digest of the part data. This parameter is
// auto-populated when using the command from the CLI. This parameter is required
// if object lock parameters are specified.
+ //
+ // This functionality is not supported for directory buckets.
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// Object key for which the multipart upload was initiated.
@@ -40148,14 +43423,19 @@ type UploadPartInput struct {
PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (for example,
- // AES256).
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
@@ -40165,6 +43445,8 @@ type UploadPartInput struct {
// header. This must be the same encryption key specified in the initiate multipart
// upload request.
//
+ // This functionality is not supported for directory buckets.
+ //
// SSECustomerKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by UploadPartInput's
// String and GoString methods.
@@ -40173,6 +43455,8 @@ type UploadPartInput struct {
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Upload ID identifying the multipart upload whose part is being uploaded.
@@ -40375,37 +43659,47 @@ type UploadPartOutput struct {
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"`
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"`
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
- // present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // present if it was uploaded with the object. When you use the API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"`
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only
- // be present if it was uploaded with the object. With multipart uploads, this
- // may not be a checksum value of the object. For more information about how
- // checksums are calculated with multipart uploads, see Checking object integrity
- // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
+ // be present if it was uploaded with the object. When you use an API operation
+ // on an object that was uploaded using multipart uploads, this value may not
+ // be a direct checksum value of the full object. Instead, it's a calculation
+ // based on the checksum values of each individual part. For more information
+ // about how checksums are calculated with multipart uploads, see Checking object
+ // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"`
@@ -40414,28 +43708,39 @@ type UploadPartOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header confirming the encryption algorithm
- // used.
+ // the response will include this header to confirm the encryption algorithm
+ // that's used.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// If server-side encryption with a customer-provided encryption key was requested,
- // the response will include this header to provide round-trip message integrity
+ // the response will include this header to provide the round-trip message integrity
// verification of the customer-provided encryption key.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key was used for the object.
+ // If present, indicates the ID of the Key Management Service (KMS) symmetric
+ // encryption customer managed key that was used for the object.
+ //
+ // This functionality is not supported for directory buckets.
//
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by UploadPartOutput's
// String and GoString methods.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
+ // The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256, aws:kms).
+ //
+ // For directory buckets, only server-side encryption with Amazon S3 managed
+ // keys (SSE-S3) (AES256) is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
}
@@ -40819,6 +44124,8 @@ type WriteGetObjectResponseInput struct {
// If present, indicates that the requester was successfully charged for the
// request.
+ //
+ // This functionality is not supported for directory buckets.
RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"`
// Route prefix to the HTTP URL generated.
@@ -40845,9 +44152,9 @@ type WriteGetObjectResponseInput struct {
// server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html).
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"`
- // If present, specifies the ID of the Amazon Web Services Key Management Service
- // (Amazon Web Services KMS) symmetric encryption customer managed key that
- // was used for stored in Amazon S3 object.
+ // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon
+ // Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption
+ // customer managed key that was used for stored in Amazon S3 object.
//
// SSEKMSKeyId is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by WriteGetObjectResponseInput's
@@ -41295,6 +44602,9 @@ const (
// BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value
BucketLocationConstraintApSouth1 = "ap-south-1"
+ // BucketLocationConstraintApSouth2 is a BucketLocationConstraint enum value
+ BucketLocationConstraintApSouth2 = "ap-south-2"
+
// BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value
BucketLocationConstraintApSoutheast1 = "ap-southeast-1"
@@ -41325,6 +44635,9 @@ const (
// BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value
BucketLocationConstraintEuSouth1 = "eu-south-1"
+ // BucketLocationConstraintEuSouth2 is a BucketLocationConstraint enum value
+ BucketLocationConstraintEuSouth2 = "eu-south-2"
+
// BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value
BucketLocationConstraintEuWest1 = "eu-west-1"
@@ -41354,12 +44667,6 @@ const (
// BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value
BucketLocationConstraintUsWest2 = "us-west-2"
-
- // BucketLocationConstraintApSouth2 is a BucketLocationConstraint enum value
- BucketLocationConstraintApSouth2 = "ap-south-2"
-
- // BucketLocationConstraintEuSouth2 is a BucketLocationConstraint enum value
- BucketLocationConstraintEuSouth2 = "eu-south-2"
)
// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum
@@ -41371,6 +44678,7 @@ func BucketLocationConstraint_Values() []string {
BucketLocationConstraintApNortheast2,
BucketLocationConstraintApNortheast3,
BucketLocationConstraintApSouth1,
+ BucketLocationConstraintApSouth2,
BucketLocationConstraintApSoutheast1,
BucketLocationConstraintApSoutheast2,
BucketLocationConstraintApSoutheast3,
@@ -41381,6 +44689,7 @@ func BucketLocationConstraint_Values() []string {
BucketLocationConstraintEuCentral1,
BucketLocationConstraintEuNorth1,
BucketLocationConstraintEuSouth1,
+ BucketLocationConstraintEuSouth2,
BucketLocationConstraintEuWest1,
BucketLocationConstraintEuWest2,
BucketLocationConstraintEuWest3,
@@ -41391,8 +44700,6 @@ func BucketLocationConstraint_Values() []string {
BucketLocationConstraintUsGovWest1,
BucketLocationConstraintUsWest1,
BucketLocationConstraintUsWest2,
- BucketLocationConstraintApSouth2,
- BucketLocationConstraintEuSouth2,
}
}
@@ -41416,6 +44723,18 @@ func BucketLogsPermission_Values() []string {
}
}
+const (
+ // BucketTypeDirectory is a BucketType enum value
+ BucketTypeDirectory = "Directory"
+)
+
+// BucketType_Values returns all elements of the BucketType enum
+func BucketType_Values() []string {
+ return []string{
+ BucketTypeDirectory,
+ }
+}
+
const (
// BucketVersioningStatusEnabled is a BucketVersioningStatus enum value
BucketVersioningStatusEnabled = "Enabled"
@@ -41488,6 +44807,18 @@ func CompressionType_Values() []string {
}
}
+const (
+ // DataRedundancySingleAvailabilityZone is a DataRedundancy enum value
+ DataRedundancySingleAvailabilityZone = "SingleAvailabilityZone"
+)
+
+// DataRedundancy_Values returns all elements of the DataRedundancy enum
+func DataRedundancy_Values() []string {
+ return []string{
+ DataRedundancySingleAvailabilityZone,
+ }
+}
+
const (
// DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value
DeleteMarkerReplicationStatusEnabled = "Enabled"
@@ -41887,6 +45218,18 @@ func JSONType_Values() []string {
}
}
+const (
+ // LocationTypeAvailabilityZone is a LocationType enum value
+ LocationTypeAvailabilityZone = "AvailabilityZone"
+)
+
+// LocationType_Values returns all elements of the LocationType enum
+func LocationType_Values() []string {
+ return []string{
+ LocationTypeAvailabilityZone,
+ }
+}
+
const (
// MFADeleteEnabled is a MFADelete enum value
MFADeleteEnabled = "Enabled"
@@ -42087,8 +45430,19 @@ func ObjectLockRetentionMode_Values() []string {
// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer
// affect permissions. The bucket owner automatically owns and has full control
// over every object in the bucket. The bucket only accepts PUT requests that
-// don't specify an ACL or bucket owner full control ACLs, such as the bucket-owner-full-control
-// canned ACL or an equivalent form of this ACL expressed in the XML format.
+// don't specify an ACL or specify bucket owner full control ACLs (such as the
+// predefined bucket-owner-full-control canned ACL or a custom ACL in XML format
+// that grants the same permissions).
+//
+// By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled.
+// We recommend keeping ACLs disabled, except in uncommon use cases where you
+// must control access for each object individually. For more information about
+// S3 Object Ownership, see Controlling ownership of objects and disabling ACLs
+// for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
+// in the Amazon S3 User Guide.
+//
+// This functionality is not supported for directory buckets. Directory buckets
+// use the bucket owner enforced setting for S3 Object Ownership.
const (
// ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value
ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred"
@@ -42139,6 +45493,9 @@ const (
// ObjectStorageClassSnow is a ObjectStorageClass enum value
ObjectStorageClassSnow = "SNOW"
+
+ // ObjectStorageClassExpressOnezone is a ObjectStorageClass enum value
+ ObjectStorageClassExpressOnezone = "EXPRESS_ONEZONE"
)
// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum
@@ -42154,6 +45511,7 @@ func ObjectStorageClass_Values() []string {
ObjectStorageClassOutposts,
ObjectStorageClassGlacierIr,
ObjectStorageClassSnow,
+ ObjectStorageClassExpressOnezone,
}
}
@@ -42193,6 +45551,22 @@ func OwnerOverride_Values() []string {
}
}
+const (
+ // PartitionDateSourceEventTime is a PartitionDateSource enum value
+ PartitionDateSourceEventTime = "EventTime"
+
+ // PartitionDateSourceDeliveryTime is a PartitionDateSource enum value
+ PartitionDateSourceDeliveryTime = "DeliveryTime"
+)
+
+// PartitionDateSource_Values returns all elements of the PartitionDateSource enum
+func PartitionDateSource_Values() []string {
+ return []string{
+ PartitionDateSourceEventTime,
+ PartitionDateSourceDeliveryTime,
+ }
+}
+
const (
// PayerRequester is a Payer enum value
PayerRequester = "Requester"
@@ -42313,6 +45687,9 @@ const (
// ReplicationStatusReplica is a ReplicationStatus enum value
ReplicationStatusReplica = "REPLICA"
+
+ // ReplicationStatusCompleted is a ReplicationStatus enum value
+ ReplicationStatusCompleted = "COMPLETED"
)
// ReplicationStatus_Values returns all elements of the ReplicationStatus enum
@@ -42322,6 +45699,7 @@ func ReplicationStatus_Values() []string {
ReplicationStatusPending,
ReplicationStatusFailed,
ReplicationStatusReplica,
+ ReplicationStatusCompleted,
}
}
@@ -42343,6 +45721,8 @@ func ReplicationTimeStatus_Values() []string {
// If present, indicates that the requester was successfully charged for the
// request.
+//
+// This functionality is not supported for directory buckets.
const (
// RequestChargedRequester is a RequestCharged enum value
RequestChargedRequester = "requester"
@@ -42356,10 +45736,14 @@ func RequestCharged_Values() []string {
}
// Confirms that the requester knows that they will be charged for the request.
-// Bucket owners need not specify this parameter in their requests. For information
-// about downloading objects from Requester Pays buckets, see Downloading Objects
+// Bucket owners need not specify this parameter in their requests. If either
+// the source or destination S3 bucket has Requester Pays enabled, the requester
+// will pay for corresponding charges to copy the object. For information about
+// downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+//
+// This functionality is not supported for directory buckets.
const (
// RequestPayerRequester is a RequestPayer enum value
RequestPayerRequester = "requester"
@@ -42404,6 +45788,22 @@ func ServerSideEncryption_Values() []string {
}
}
+const (
+ // SessionModeReadOnly is a SessionMode enum value
+ SessionModeReadOnly = "ReadOnly"
+
+ // SessionModeReadWrite is a SessionMode enum value
+ SessionModeReadWrite = "ReadWrite"
+)
+
+// SessionMode_Values returns all elements of the SessionMode enum
+func SessionMode_Values() []string {
+ return []string{
+ SessionModeReadOnly,
+ SessionModeReadWrite,
+ }
+}
+
const (
// SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value
SseKmsEncryptedObjectsStatusEnabled = "Enabled"
@@ -42450,6 +45850,9 @@ const (
// StorageClassSnow is a StorageClass enum value
StorageClassSnow = "SNOW"
+
+ // StorageClassExpressOnezone is a StorageClass enum value
+ StorageClassExpressOnezone = "EXPRESS_ONEZONE"
)
// StorageClass_Values returns all elements of the StorageClass enum
@@ -42465,6 +45868,7 @@ func StorageClass_Values() []string {
StorageClassOutposts,
StorageClassGlacierIr,
StorageClassSnow,
+ StorageClassExpressOnezone,
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
index cd6a2e8ae..8a67333ab 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
@@ -25,6 +25,15 @@ const (
// "InvalidObjectState".
//
// Object is archived and inaccessible until restored.
+ //
+ // If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval
+ // storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering
+ // Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier,
+ // before you can retrieve the object you must first restore a copy using RestoreObject
+ // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html).
+ // Otherwise, this operation returns an InvalidObjectState error. For information
+ // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html)
+ // in the Amazon S3 User Guide.
ErrCodeInvalidObjectState = "InvalidObjectState"
// ErrCodeNoSuchBucket for service response error code
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
index 6d679a299..d13b46170 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go
@@ -80,6 +80,10 @@ type S3API interface {
CreateMultipartUploadWithContext(aws.Context, *s3.CreateMultipartUploadInput, ...request.Option) (*s3.CreateMultipartUploadOutput, error)
CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput)
+ CreateSession(*s3.CreateSessionInput) (*s3.CreateSessionOutput, error)
+ CreateSessionWithContext(aws.Context, *s3.CreateSessionInput, ...request.Option) (*s3.CreateSessionOutput, error)
+ CreateSessionRequest(*s3.CreateSessionInput) (*request.Request, *s3.CreateSessionOutput)
+
DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error)
DeleteBucketWithContext(aws.Context, *s3.DeleteBucketInput, ...request.Option) (*s3.DeleteBucketOutput, error)
DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput)
@@ -300,6 +304,13 @@ type S3API interface {
ListBucketsWithContext(aws.Context, *s3.ListBucketsInput, ...request.Option) (*s3.ListBucketsOutput, error)
ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput)
+ ListDirectoryBuckets(*s3.ListDirectoryBucketsInput) (*s3.ListDirectoryBucketsOutput, error)
+ ListDirectoryBucketsWithContext(aws.Context, *s3.ListDirectoryBucketsInput, ...request.Option) (*s3.ListDirectoryBucketsOutput, error)
+ ListDirectoryBucketsRequest(*s3.ListDirectoryBucketsInput) (*request.Request, *s3.ListDirectoryBucketsOutput)
+
+ ListDirectoryBucketsPages(*s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool) error
+ ListDirectoryBucketsPagesWithContext(aws.Context, *s3.ListDirectoryBucketsInput, func(*s3.ListDirectoryBucketsOutput, bool) bool, ...request.Option) error
+
ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error)
ListMultipartUploadsWithContext(aws.Context, *s3.ListMultipartUploadsInput, ...request.Option) (*s3.ListMultipartUploadsOutput, error)
ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
index 008633498..f9c6e786d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go
@@ -23,9 +23,32 @@ type UploadInput struct {
_ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"`
// The canned ACL to apply to the object. For more information, see Canned ACL
- // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
+ // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
+ // in the Amazon S3 User Guide.
+ //
+ // When adding a new object, you can use headers to grant ACL-based permissions
+ // to individual Amazon Web Services accounts or to predefined groups defined
+ // by Amazon S3. These permissions are then added to the ACL on the object.
+ // By default, all objects are private. Only the owner has full access control.
+ // For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
+ // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html)
+ // in the Amazon S3 User Guide.
+ //
+ // If the bucket that you're uploading objects to uses the bucket owner enforced
+ // setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions.
+ // Buckets that use this setting only accept PUT requests that don't specify
+ // an ACL or PUT requests that specify bucket owner full control ACLs, such
+ // as the bucket-owner-full-control canned ACL or an equivalent form of this
+ // ACL expressed in the XML format. PUT requests that contain other ACLs (for
+ // example, custom grants to certain Amazon Web Services accounts) fail and
+ // return a 400 error with the error code AccessControlListNotSupported. For
+ // more information, see Controlling ownership of objects and disabling ACLs
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
+ // in the Amazon S3 User Guide.
+ //
+ // * This functionality is not supported for directory buckets.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for Amazon S3 on Outposts.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
// The readable body payload to send to S3.
@@ -33,19 +56,33 @@ type UploadInput struct {
// The bucket name to which the PUT action was initiated.
//
- // When using this action with an access point, you must direct requests to
- // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
+ // Directory buckets - When you use this operation with a directory bucket,
+ // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com.
+ // Path-style requests are not supported. Directory bucket names must be unique
+ // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3
+ // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about
+ // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Access points - When you use this action with an access point, you must provide
+ // the alias of the access point in place of the bucket name or specify the
+ // access point ARN. When using the access point ARN, you must direct requests
+ // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide.
//
- // When you use this action with Amazon S3 on Outposts, you must direct requests
- // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
- // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
- // you use this action with S3 on Outposts through the Amazon Web Services SDKs,
- // you provide the Outposts access point ARN in place of the bucket name. For
- // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
+ // Access points and Object Lambda access points are not supported by directory
+ // buckets.
+ //
+ // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you
+ // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname
+ // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com.
+ // When you use this action with S3 on Outposts through the Amazon Web Services
+ // SDKs, you provide the Outposts access point ARN in place of the bucket name.
+ // For more information about S3 on Outposts ARNs, see What is S3 on Outposts?
+ // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
//
// Bucket is a required field
@@ -58,6 +95,8 @@ type UploadInput struct {
//
// Specifying this header with a PUT action doesn’t affect bucket-level settings
// for S3 Bucket Key.
+ //
+ // This functionality is not supported for directory buckets.
BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"`
// Can be used to specify caching behavior along the request/reply chain. For
@@ -65,16 +104,33 @@ type UploadInput struct {
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9).
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
- // Indicates the algorithm used to create the checksum for the object when using
- // the SDK. This header will not provide any additional functionality if not
- // using the SDK. When sending this header, there must be a corresponding x-amz-checksum
- // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with
- // the HTTP status code 400 Bad Request. For more information, see Checking
- // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
+ // Indicates the algorithm used to create the checksum for the object when you
+ // use the SDK. This header will not provide any additional functionality if
+ // you don't use the SDK. When you send this header, there must be a corresponding
+ // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon
+ // S3 fails the request with the HTTP status code 400 Bad Request.
+ //
+ // For the x-amz-checksum-algorithm header, replace algorithm with the supported
+ // algorithm from the following list:
+ //
+ // * CRC32
+ //
+ // * CRC32C
+ //
+ // * SHA1
+ //
+ // * SHA256
+ //
+ // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
//
- // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm
- // parameter.
+ // If the individual checksum value you provide through x-amz-checksum-algorithm
+ // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm,
+ // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum
+ // algorithm that matches the provided value in x-amz-checksum-algorithm .
+ //
+ // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
+ // default checksum algorithm that's used for performance.
//
// The AWS SDK for Go v1 does not support automatic computing request payload
// checksum. This feature is available in the AWS SDK for Go v2. If a value
@@ -130,6 +186,13 @@ type UploadInput struct {
// integrity check. For more information about REST request authentication,
// see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html).
//
+ // The Content-MD5 header is required for any request to upload an object with
+ // a retention period configured using Amazon S3 Object Lock. For more information
+ // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
+ //
// If the ContentMD5 is provided for a multipart upload, it will be ignored.
// Objects that will be uploaded in a single part, the ContentMD5 will be used.
ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
@@ -138,9 +201,9 @@ type UploadInput struct {
// see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type).
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
- // The account ID of the expected bucket owner. If the bucket is owned by a
- // different account, the request fails with the HTTP status code 403 Forbidden
- // (access denied).
+ // The account ID of the expected bucket owner. If the account ID that you provide
+ // does not match the actual owner of the bucket, the request fails with the
+ // HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The date and time at which the object is no longer cacheable. For more information,
@@ -149,22 +212,30 @@ type UploadInput struct {
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to read the object data and its metadata.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the object ACL.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to write the ACL for the applicable object.
//
- // This action is not supported by Amazon S3 on Outposts.
+ // * This functionality is not supported for directory buckets.
+ //
+ // * This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Object key for which the PUT action was initiated.
@@ -176,25 +247,37 @@ type UploadInput struct {
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// Specifies whether a legal hold will be applied to this object. For more information
- // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
+ // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
// The Object Lock mode that you want to apply to this object.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
// The date and time when you want this object's Object Lock to expire. Must
// be formatted as a timestamp parameter.
+ //
+ // This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
// Confirms that the requester knows that they will be charged for the request.
- // Bucket owners need not specify this parameter in their requests. For information
- // about downloading objects from Requester Pays buckets, see Downloading Objects
+ // Bucket owners need not specify this parameter in their requests. If either
+ // the source or destination S3 bucket has Requester Pays enabled, the requester
+ // will pay for corresponding charges to copy the object. For information about
+ // downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- // Specifies the algorithm to use to when encrypting the object (for example,
- // AES256).
+ // Specifies the algorithm to use when encrypting the object (for example, AES256).
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
@@ -202,50 +285,80 @@ type UploadInput struct {
// S3 does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
+ //
+ // This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
// JSON with the encryption context key-value pairs. This value is stored as
// object metadata and automatically gets passed on to Amazon Web Services KMS
- // for future GetObject or CopyObject operations on this object.
+ // for future GetObject or CopyObject operations on this object. This value
+ // must be explicitly added during CopyObject operations.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
// If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse,
- // this header specifies the ID of the Key Management Service (KMS) symmetric
- // encryption customer managed key that was used for the object. If you specify
- // x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse,
+ // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management
+ // Service (KMS) symmetric encryption customer managed key that was used for
+ // the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse,
// but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3
// uses the Amazon Web Services managed key (aws/s3) to protect the data. If
// the KMS key does not exist in the same account that's issuing the command,
// you must use the full ARN and not just the ID.
+ //
+ // This functionality is not supported for directory buckets.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
- // The server-side encryption algorithm used when storing this object in Amazon
- // S3 (for example, AES256, aws:kms, aws:kms:dsse).
+ // The server-side encryption algorithm that was used when you store this object
+ // in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse).
+ //
+ // General purpose buckets - You have four mutually exclusive options to protect
+ // data using server-side encryption in Amazon S3, depending on how you choose
+ // to manage the encryption keys. Specifically, the encryption key options are
+ // Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or
+ // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with
+ // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default.
+ // You can optionally tell Amazon S3 to encrypt data at rest by using server-side
+ // encryption with other key options. For more information, see Using Server-Side
+ // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
+ // in the Amazon S3 User Guide.
+ //
+ // Directory buckets - For directory buckets, only the server-side encryption
+ // with Amazon S3 managed keys (SSE-S3) (AES256) value is supported.
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high availability.
// Depending on performance needs, you can specify a different Storage Class.
- // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
- // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
+ // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide.
+ //
+ // * For directory buckets, only the S3 Express One Zone storage class is
+ // supported to store newly created objects.
+ //
+ // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
// (For example, "Key1=Value1")
+ //
+ // This functionality is not supported for directory buckets.
Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata. For information about object
- // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html).
+ // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html)
+ // in the Amazon S3 User Guide.
//
// In the following example, the request header sets the redirect to an object
// (anotherPage.html) in the same bucket:
@@ -259,6 +372,9 @@ type UploadInput struct {
//
// For more information about website hosting in Amazon S3, see Hosting Websites
// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
- // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
+ // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html)
+ // in the Amazon S3 User Guide.
+ //
+ // This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
index c743913c5..827bd5194 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
@@ -56,9 +56,10 @@ func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Requ
// CreateToken API operation for AWS SSO OIDC.
//
-// Creates and returns an access token for the authorized client. The access
-// token issued will be used to fetch short-term credentials for the assigned
-// roles in the AWS account.
+// Creates and returns access and refresh tokens for clients that are authenticated
+// using client secrets. The access token can be used to fetch short-term credentials
+// for the assigned AWS accounts or to access application APIs using bearer
+// authentication.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -133,6 +134,131 @@ func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInpu
return out, req.Send()
}
+const opCreateTokenWithIAM = "CreateTokenWithIAM"
+
+// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the
+// client's request for the CreateTokenWithIAM operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+// // Example sending a request using the CreateTokenWithIAMRequest method.
+// req, resp := client.CreateTokenWithIAMRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM
+func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) {
+ op := &request.Operation{
+ Name: opCreateTokenWithIAM,
+ HTTPMethod: "POST",
+ HTTPPath: "/token?aws_iam=t",
+ }
+
+ if input == nil {
+ input = &CreateTokenWithIAMInput{}
+ }
+
+ output = &CreateTokenWithIAMOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateTokenWithIAM API operation for AWS SSO OIDC.
+//
+// Creates and returns access and refresh tokens for clients and applications
+// that are authenticated using IAM entities. The access token can be used to
+// fetch short-term credentials for the assigned Amazon Web Services accounts
+// or to access application APIs using bearer authentication.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS SSO OIDC's
+// API operation CreateTokenWithIAM for usage and error information.
+//
+// Returned Error Types:
+//
+// - InvalidRequestException
+// Indicates that something is wrong with the input to the request. For example,
+// a required parameter might be missing or out of range.
+//
+// - InvalidClientException
+// Indicates that the clientId or clientSecret in the request is invalid. For
+// example, this can occur when a client sends an incorrect clientId or an expired
+// clientSecret.
+//
+// - InvalidGrantException
+// Indicates that a request contains an invalid grant. This can occur if a client
+// makes a CreateToken request with an invalid grant type.
+//
+// - UnauthorizedClientException
+// Indicates that the client is not currently authorized to make the request.
+// This can happen when a clientId is not issued for a public client.
+//
+// - UnsupportedGrantTypeException
+// Indicates that the grant type in the request is not supported by the service.
+//
+// - InvalidScopeException
+// Indicates that the scope provided in the request is invalid.
+//
+// - AuthorizationPendingException
+// Indicates that a request to authorize a client with an access user session
+// token is pending.
+//
+// - SlowDownException
+// Indicates that the client is making the request too frequently and is more
+// than the service can handle.
+//
+// - AccessDeniedException
+// You do not have sufficient access to perform this action.
+//
+// - ExpiredTokenException
+// Indicates that the token issued by the service is expired and is no longer
+// valid.
+//
+// - InternalServerException
+// Indicates that an error from the service occurred while trying to process
+// a request.
+//
+// - InvalidRequestRegionException
+// Indicates that a token provided as input to the request was issued by and
+// is only usable by calling IAM Identity Center endpoints in another region.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM
+func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) {
+ req, out := c.CreateTokenWithIAMRequest(input)
+ return out, req.Send()
+}
+
+// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateTokenWithIAM for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) {
+ req, out := c.CreateTokenWithIAMRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
const opRegisterClient = "RegisterClient"
// RegisterClientRequest generates a "aws/request.Request" representing the
@@ -205,6 +331,13 @@ func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *reques
// Indicates that an error from the service occurred while trying to process
// a request.
//
+// - InvalidRedirectUriException
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+//
+// - UnsupportedGrantTypeException
+// Indicates that the grant type in the request is not supported by the service.
+//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient
func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) {
req, out := c.RegisterClientRequest(input)
@@ -331,8 +464,11 @@ type AccessDeniedException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be access_denied.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -400,8 +536,11 @@ type AuthorizationPendingException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be authorization_pending.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -466,8 +605,8 @@ func (s *AuthorizationPendingException) RequestID() string {
type CreateTokenInput struct {
_ struct{} `type:"structure"`
- // The unique identifier string for each client. This value should come from
- // the persisted result of the RegisterClient API.
+ // The unique identifier string for the client or application. This value comes
+ // from the result of the RegisterClient API.
//
// ClientId is a required field
ClientId *string `locationName:"clientId" type:"string" required:"true"`
@@ -475,23 +614,39 @@ type CreateTokenInput struct {
// A secret string generated for the client. This value should come from the
// persisted result of the RegisterClient API.
//
+ // ClientSecret is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenInput's
+ // String and GoString methods.
+ //
// ClientSecret is a required field
- ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"`
+ ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"`
- // The authorization code received from the authorization service. This parameter
- // is required to perform an authorization grant request to get access to a
- // token.
+ // Used only when calling this API for the Authorization Code grant type. The
+ // short-term code is used to identify this authorization request. This grant
+ // type is currently unsupported for the CreateToken API.
Code *string `locationName:"code" type:"string"`
- // Used only when calling this API for the device code grant type. This short-term
- // code is used to identify this authentication attempt. This should come from
- // an in-memory reference to the result of the StartDeviceAuthorization API.
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ //
+ // CodeVerifier is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenInput's
+ // String and GoString methods.
+ CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"`
+
+ // Used only when calling this API for the Device Code grant type. This short-term
+ // code is used to identify this authorization request. This comes from the
+ // result of the StartDeviceAuthorization API.
DeviceCode *string `locationName:"deviceCode" type:"string"`
- // Supports grant types for the authorization code, refresh token, and device
- // code request. For device code requests, specify the following value:
+ // Supports the following OAuth grant types: Device Code and Refresh Token.
+ // Specify either of the following values, depending on the grant type that
+ // you want:
+ //
+ // * Device Code - urn:ietf:params:oauth:grant-type:device_code
//
- // urn:ietf:params:oauth:grant-type:device_code
+ // * Refresh Token - refresh_token
//
// For information about how to obtain the device code, see the StartDeviceAuthorization
// topic.
@@ -499,21 +654,28 @@ type CreateTokenInput struct {
// GrantType is a required field
GrantType *string `locationName:"grantType" type:"string" required:"true"`
- // The location of the application that will receive the authorization code.
- // Users authorize the service to send the request to this location.
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value specifies the location of the client or application that has registered
+ // to receive the authorization code.
RedirectUri *string `locationName:"redirectUri" type:"string"`
- // Currently, refreshToken is not yet implemented and is not supported. For
- // more information about the features and limitations of the current IAM Identity
- // Center OIDC implementation, see Considerations for Using this Guide in the
- // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ // Used only when calling this API for the Refresh Token grant type. This token
+ // is used to refresh short-term tokens, such as the access token, that might
+ // expire.
//
- // The token used to obtain an access token in the event that the access token
- // is invalid or expired.
- RefreshToken *string `locationName:"refreshToken" type:"string"`
-
- // The list of scopes that is defined by the client. Upon authorization, this
- // list is used to restrict permissions when granting an access token.
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide
+ // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ //
+ // RefreshToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenInput's
+ // String and GoString methods.
+ RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
+
+ // The list of scopes for which authorization is requested. The access token
+ // that is issued is limited to the scopes that are granted. If this value is
+ // not specified, IAM Identity Center authorizes all scopes that are configured
+ // for the client during the call to RegisterClient.
Scope []*string `locationName:"scope" type:"list"`
}
@@ -572,6 +734,12 @@ func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput {
return s
}
+// SetCodeVerifier sets the CodeVerifier field's value.
+func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput {
+ s.CodeVerifier = &v
+ return s
+}
+
// SetDeviceCode sets the DeviceCode field's value.
func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput {
s.DeviceCode = &v
@@ -605,31 +773,44 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput {
type CreateTokenOutput struct {
_ struct{} `type:"structure"`
- // An opaque token to access IAM Identity Center resources assigned to a user.
- AccessToken *string `locationName:"accessToken" type:"string"`
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
+ //
+ // AccessToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenOutput's
+ // String and GoString methods.
+ AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"`
// Indicates the time in seconds when an access token will expire.
ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
- // Currently, idToken is not yet implemented and is not supported. For more
- // information about the features and limitations of the current IAM Identity
- // Center OIDC implementation, see Considerations for Using this Guide in the
- // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ // The idToken is not implemented or supported. For more information about the
+ // features and limitations of the current IAM Identity Center OIDC implementation,
+ // see Considerations for Using this Guide in the IAM Identity Center OIDC API
+ // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
//
- // The identifier of the user that associated with the access token, if present.
- IdToken *string `locationName:"idToken" type:"string"`
-
- // Currently, refreshToken is not yet implemented and is not supported. For
- // more information about the features and limitations of the current IAM Identity
- // Center OIDC implementation, see Considerations for Using this Guide in the
- // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ // A JSON Web Token (JWT) that identifies who is associated with the issued
+ // access token.
//
+ // IdToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenOutput's
+ // String and GoString methods.
+ IdToken *string `locationName:"idToken" type:"string" sensitive:"true"`
+
// A token that, if present, can be used to refresh a previously issued access
// token that might have expired.
- RefreshToken *string `locationName:"refreshToken" type:"string"`
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide
+ // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ //
+ // RefreshToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenOutput's
+ // String and GoString methods.
+ RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
// Used to notify the client that the returned token is an access token. The
- // supported type is BearerToken.
+ // supported token type is Bearer.
TokenType *string `locationName:"tokenType" type:"string"`
}
@@ -681,14 +862,328 @@ func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput {
return s
}
+type CreateTokenWithIAMInput struct {
+ _ struct{} `type:"structure"`
+
+ // Used only when calling this API for the JWT Bearer grant type. This value
+ // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize
+ // a trusted token issuer, configure the JWT Bearer GrantOptions for the application.
+ //
+ // Assertion is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
+ // String and GoString methods.
+ Assertion *string `locationName:"assertion" type:"string" sensitive:"true"`
+
+ // The unique identifier string for the client or application. This value is
+ // an application ARN that has OAuth grants configured.
+ //
+ // ClientId is a required field
+ ClientId *string `locationName:"clientId" type:"string" required:"true"`
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // short-term code is used to identify this authorization request. The code
+ // is obtained through a redirect from IAM Identity Center to a redirect URI
+ // persisted in the Authorization Code GrantOptions for the application.
+ Code *string `locationName:"code" type:"string"`
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ //
+ // CodeVerifier is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
+ // String and GoString methods.
+ CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"`
+
+ // Supports the following OAuth grant types: Authorization Code, Refresh Token,
+ // JWT Bearer, and Token Exchange. Specify one of the following values, depending
+ // on the grant type that you want:
+ //
+ // * Authorization Code - authorization_code
+ //
+ // * Refresh Token - refresh_token
+ //
+ // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer
+ //
+ // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange
+ //
+ // GrantType is a required field
+ GrantType *string `locationName:"grantType" type:"string" required:"true"`
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value specifies the location of the client or application that has registered
+ // to receive the authorization code.
+ RedirectUri *string `locationName:"redirectUri" type:"string"`
+
+ // Used only when calling this API for the Refresh Token grant type. This token
+ // is used to refresh short-term tokens, such as the access token, that might
+ // expire.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide
+ // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ //
+ // RefreshToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
+ // String and GoString methods.
+ RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the type of token that the requester can receive. The following
+ // values are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+ RequestedTokenType *string `locationName:"requestedTokenType" type:"string"`
+
+ // The list of scopes for which authorization is requested. The access token
+ // that is issued is limited to the scopes that are granted. If the value is
+ // not specified, IAM Identity Center authorizes all scopes configured for the
+ // application, including the following default scopes: openid, aws, sts:identity_context.
+ Scope []*string `locationName:"scope" type:"list"`
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the subject of the exchange. The value of the subject token must
+ // be an access token issued by IAM Identity Center to a different client or
+ // application. The access token must have authorized scopes that indicate the
+ // requested application as a target audience.
+ //
+ // SubjectToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
+ // String and GoString methods.
+ SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"`
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the type of token that is passed as the subject of the exchange.
+ // The following value is supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ SubjectTokenType *string `locationName:"subjectTokenType" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenWithIAMInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenWithIAMInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateTokenWithIAMInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"}
+ if s.ClientId == nil {
+ invalidParams.Add(request.NewErrParamRequired("ClientId"))
+ }
+ if s.GrantType == nil {
+ invalidParams.Add(request.NewErrParamRequired("GrantType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAssertion sets the Assertion field's value.
+func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput {
+ s.Assertion = &v
+ return s
+}
+
+// SetClientId sets the ClientId field's value.
+func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput {
+ s.ClientId = &v
+ return s
+}
+
+// SetCode sets the Code field's value.
+func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput {
+ s.Code = &v
+ return s
+}
+
+// SetCodeVerifier sets the CodeVerifier field's value.
+func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput {
+ s.CodeVerifier = &v
+ return s
+}
+
+// SetGrantType sets the GrantType field's value.
+func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput {
+ s.GrantType = &v
+ return s
+}
+
+// SetRedirectUri sets the RedirectUri field's value.
+func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput {
+ s.RedirectUri = &v
+ return s
+}
+
+// SetRefreshToken sets the RefreshToken field's value.
+func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput {
+ s.RefreshToken = &v
+ return s
+}
+
+// SetRequestedTokenType sets the RequestedTokenType field's value.
+func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput {
+ s.RequestedTokenType = &v
+ return s
+}
+
+// SetScope sets the Scope field's value.
+func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput {
+ s.Scope = v
+ return s
+}
+
+// SetSubjectToken sets the SubjectToken field's value.
+func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput {
+ s.SubjectToken = &v
+ return s
+}
+
+// SetSubjectTokenType sets the SubjectTokenType field's value.
+func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput {
+ s.SubjectTokenType = &v
+ return s
+}
+
+type CreateTokenWithIAMOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
+ //
+ // AccessToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
+ // String and GoString methods.
+ AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"`
+
+ // Indicates the time in seconds when an access token will expire.
+ ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
+
+ // A JSON Web Token (JWT) that identifies the user associated with the issued
+ // access token.
+ //
+ // IdToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
+ // String and GoString methods.
+ IdToken *string `locationName:"idToken" type:"string" sensitive:"true"`
+
+ // Indicates the type of tokens that are issued by IAM Identity Center. The
+ // following values are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+ IssuedTokenType *string `locationName:"issuedTokenType" type:"string"`
+
+ // A token that, if present, can be used to refresh a previously issued access
+ // token that might have expired.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide
+ // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
+ //
+ // RefreshToken is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
+ // String and GoString methods.
+ RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
+
+ // The list of scopes for which authorization is granted. The access token that
+ // is issued is limited to the scopes that are granted.
+ Scope []*string `locationName:"scope" type:"list"`
+
+ // Used to notify the requester that the returned token is an access token.
+ // The supported token type is Bearer.
+ TokenType *string `locationName:"tokenType" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenWithIAMOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s CreateTokenWithIAMOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccessToken sets the AccessToken field's value.
+func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput {
+ s.AccessToken = &v
+ return s
+}
+
+// SetExpiresIn sets the ExpiresIn field's value.
+func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput {
+ s.ExpiresIn = &v
+ return s
+}
+
+// SetIdToken sets the IdToken field's value.
+func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput {
+ s.IdToken = &v
+ return s
+}
+
+// SetIssuedTokenType sets the IssuedTokenType field's value.
+func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput {
+ s.IssuedTokenType = &v
+ return s
+}
+
+// SetRefreshToken sets the RefreshToken field's value.
+func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput {
+ s.RefreshToken = &v
+ return s
+}
+
+// SetScope sets the Scope field's value.
+func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput {
+ s.Scope = v
+ return s
+}
+
+// SetTokenType sets the TokenType field's value.
+func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput {
+ s.TokenType = &v
+ return s
+}
+
// Indicates that the token issued by the service is expired and is no longer
// valid.
type ExpiredTokenException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be expired_token.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -756,8 +1251,11 @@ type InternalServerException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be server_error.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -826,8 +1324,11 @@ type InvalidClientException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be invalid_client.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -895,8 +1396,11 @@ type InvalidClientMetadataException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be invalid_client_metadata.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -964,8 +1468,11 @@ type InvalidGrantException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be invalid_grant.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -1027,14 +1534,89 @@ func (s *InvalidGrantException) RequestID() string {
return s.RespMetadata.RequestID
}
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+type InvalidRedirectUriException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Single error code. For this exception the value will be invalid_redirect_uri.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRedirectUriException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRedirectUriException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error {
+ return &InvalidRedirectUriException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidRedirectUriException) Code() string {
+ return "InvalidRedirectUriException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidRedirectUriException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidRedirectUriException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidRedirectUriException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidRedirectUriException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidRedirectUriException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
// Indicates that something is wrong with the input to the request. For example,
// a required parameter might be missing or out of range.
type InvalidRequestException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be invalid_request.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -1096,13 +1678,95 @@ func (s *InvalidRequestException) RequestID() string {
return s.RespMetadata.RequestID
}
+// Indicates that a token provided as input to the request was issued by and
+// is only usable by calling IAM Identity Center endpoints in another region.
+type InvalidRequestRegionException struct {
+ _ struct{} `type:"structure"`
+ RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+
+ // Indicates the IAM Identity Center endpoint which the requester may call with
+ // this token.
+ Endpoint *string `locationName:"endpoint" type:"string"`
+
+ // Single error code. For this exception the value will be invalid_request.
+ Error_ *string `locationName:"error" type:"string"`
+
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
+ Error_description *string `locationName:"error_description" type:"string"`
+
+ Message_ *string `locationName:"message" type:"string"`
+
+ // Indicates the region which the requester may call with this token.
+ Region *string `locationName:"region" type:"string"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRequestRegionException) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s InvalidRequestRegionException) GoString() string {
+ return s.String()
+}
+
+func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error {
+ return &InvalidRequestRegionException{
+ RespMetadata: v,
+ }
+}
+
+// Code returns the exception type name.
+func (s *InvalidRequestRegionException) Code() string {
+ return "InvalidRequestRegionException"
+}
+
+// Message returns the exception's message.
+func (s *InvalidRequestRegionException) Message() string {
+ if s.Message_ != nil {
+ return *s.Message_
+ }
+ return ""
+}
+
+// OrigErr always returns nil, satisfies awserr.Error interface.
+func (s *InvalidRequestRegionException) OrigErr() error {
+ return nil
+}
+
+func (s *InvalidRequestRegionException) Error() string {
+ return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
+}
+
+// Status code returns the HTTP status code for the request's response error.
+func (s *InvalidRequestRegionException) StatusCode() int {
+ return s.RespMetadata.StatusCode
+}
+
+// RequestID returns the service's response RequestID for request.
+func (s *InvalidRequestRegionException) RequestID() string {
+ return s.RespMetadata.RequestID
+}
+
// Indicates that the scope provided in the request is invalid.
type InvalidScopeException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be invalid_scope.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -1178,6 +1842,25 @@ type RegisterClientInput struct {
// ClientType is a required field
ClientType *string `locationName:"clientType" type:"string" required:"true"`
+ // This IAM Identity Center application ARN is used to define administrator-managed
+ // configuration for public client access to resources. At authorization, the
+ // scopes, grants, and redirect URI available to this client will be restricted
+ // by this application resource.
+ EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"`
+
+ // The list of OAuth 2.0 grant types that are defined by the client. This list
+ // is used to restrict the token granting flows available to the client.
+ GrantTypes []*string `locationName:"grantTypes" type:"list"`
+
+ // The IAM Identity Center Issuer URL associated with an instance of IAM Identity
+ // Center. This value is needed for user access to resources through the client.
+ IssuerUrl *string `locationName:"issuerUrl" type:"string"`
+
+ // The list of redirect URI that are defined by the client. At completion of
+ // authorization, this list is used to restrict what locations the user agent
+ // can be redirected back to.
+ RedirectUris []*string `locationName:"redirectUris" type:"list"`
+
// The list of scopes that are defined by the client. Upon authorization, this
// list is used to restrict permissions when granting an access token.
Scopes []*string `locationName:"scopes" type:"list"`
@@ -1229,6 +1912,30 @@ func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput {
return s
}
+// SetEntitledApplicationArn sets the EntitledApplicationArn field's value.
+func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput {
+ s.EntitledApplicationArn = &v
+ return s
+}
+
+// SetGrantTypes sets the GrantTypes field's value.
+func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput {
+ s.GrantTypes = v
+ return s
+}
+
+// SetIssuerUrl sets the IssuerUrl field's value.
+func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput {
+ s.IssuerUrl = &v
+ return s
+}
+
+// SetRedirectUris sets the RedirectUris field's value.
+func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput {
+ s.RedirectUris = v
+ return s
+}
+
// SetScopes sets the Scopes field's value.
func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput {
s.Scopes = v
@@ -1238,7 +1945,7 @@ func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput {
type RegisterClientOutput struct {
_ struct{} `type:"structure"`
- // The endpoint where the client can request authorization.
+ // An endpoint that the client can use to request authorization.
AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"`
// The unique identifier string for each client. This client uses this identifier
@@ -1250,12 +1957,16 @@ type RegisterClientOutput struct {
// A secret string generated for the client. The client will use this string
// to get authenticated by the service in subsequent calls.
- ClientSecret *string `locationName:"clientSecret" type:"string"`
+ //
+ // ClientSecret is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by RegisterClientOutput's
+ // String and GoString methods.
+ ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"`
// Indicates the time at which the clientId and clientSecret will become invalid.
ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"`
- // The endpoint where the client can get an access token.
+ // An endpoint that the client can use to create tokens.
TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"`
}
@@ -1319,8 +2030,11 @@ type SlowDownException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be slow_down.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -1395,11 +2109,15 @@ type StartDeviceAuthorizationInput struct {
// A secret string that is generated for the client. This value should come
// from the persisted result of the RegisterClient API operation.
//
+ // ClientSecret is a sensitive parameter and its value will be
+ // replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's
+ // String and GoString methods.
+ //
// ClientSecret is a required field
- ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"`
+ ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"`
- // The URL for the AWS access portal. For more information, see Using the AWS
- // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html)
+ // The URL for the Amazon Web Services access portal. For more information,
+ // see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html)
// in the IAM Identity Center User Guide.
//
// StartUrl is a required field
@@ -1550,8 +2268,11 @@ type UnauthorizedClientException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be unauthorized_client.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
@@ -1618,8 +2339,11 @@ type UnsupportedGrantTypeException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
+ // Single error code. For this exception the value will be unsupported_grant_type.
Error_ *string `locationName:"error" type:"string"`
+ // Human-readable text providing additional information, used to assist the
+ // client developer in understanding the error that occurred.
Error_description *string `locationName:"error_description" type:"string"`
Message_ *string `locationName:"message" type:"string"`
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go
index 8b5ee6019..083568c61 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go
@@ -3,15 +3,13 @@
// Package ssooidc provides the client and types for making API
// requests to AWS SSO OIDC.
//
-// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect
-// (OIDC) is a web service that enables a client (such as AWS CLI or a native
-// application) to register with IAM Identity Center. The service also enables
-// the client to fetch the user’s access token upon successful authentication
-// and authorization with IAM Identity Center.
+// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a
+// client (such as CLI or a native application) to register with IAM Identity
+// Center. The service also enables the client to fetch the user’s access
+// token upon successful authentication and authorization with IAM Identity
+// Center.
//
-// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces
-// will continue to retain their original name for backward compatibility purposes.
-// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed).
+// IAM Identity Center uses the sso and identitystore API namespaces.
//
// # Considerations for Using This Guide
//
@@ -22,21 +20,24 @@
// - The IAM Identity Center OIDC service currently implements only the portions
// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628
// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single
-// sign-on authentication with the AWS CLI. Support for other OIDC flows
-// frequently needed for native applications, such as Authorization Code
-// Flow (+ PKCE), will be addressed in future releases.
+// sign-on authentication with the CLI.
//
-// - The service emits only OIDC access tokens, such that obtaining a new
-// token (For example, token refresh) requires explicit user re-authentication.
+// - With older versions of the CLI, the service only emits OIDC access tokens,
+// so to obtain a new token, users must explicitly re-authenticate. To access
+// the OIDC flow that supports token refresh and doesn’t require re-authentication,
+// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI
+// V2) with support for OIDC token refresh and configurable IAM Identity
+// Center session durations. For more information, see Configure Amazon Web
+// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html).
//
-// - The access tokens provided by this service grant access to all AWS account
-// entitlements assigned to an IAM Identity Center user, not just a particular
-// application.
+// - The access tokens provided by this service grant access to all Amazon
+// Web Services account entitlements assigned to an IAM Identity Center user,
+// not just a particular application.
//
// - The documentation in this guide does not describe the mechanism to convert
-// the access token into AWS Auth (“sigv4”) credentials for use with
-// IAM-protected AWS service endpoints. For more information, see GetRoleCredentials
-// (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html)
+// the access token into Amazon Web Services Auth (“sigv4”) credentials
+// for use with IAM-protected Amazon Web Services service endpoints. For
+// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html)
// in the IAM Identity Center Portal API Reference Guide.
//
// For general information about IAM Identity Center, see What is IAM Identity
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
index 698377012..cadf4584d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
@@ -57,6 +57,13 @@ const (
// makes a CreateToken request with an invalid grant type.
ErrCodeInvalidGrantException = "InvalidGrantException"
+ // ErrCodeInvalidRedirectUriException for service response error code
+ // "InvalidRedirectUriException".
+ //
+ // Indicates that one or more redirect URI in the request is not supported for
+ // this operation.
+ ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException"
+
// ErrCodeInvalidRequestException for service response error code
// "InvalidRequestException".
//
@@ -64,6 +71,13 @@ const (
// a required parameter might be missing or out of range.
ErrCodeInvalidRequestException = "InvalidRequestException"
+ // ErrCodeInvalidRequestRegionException for service response error code
+ // "InvalidRequestRegionException".
+ //
+ // Indicates that a token provided as input to the request was issued by and
+ // is only usable by calling IAM Identity Center endpoints in another region.
+ ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException"
+
// ErrCodeInvalidScopeException for service response error code
// "InvalidScopeException".
//
@@ -99,7 +113,9 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
"InvalidClientException": newErrorInvalidClientException,
"InvalidClientMetadataException": newErrorInvalidClientMetadataException,
"InvalidGrantException": newErrorInvalidGrantException,
+ "InvalidRedirectUriException": newErrorInvalidRedirectUriException,
"InvalidRequestException": newErrorInvalidRequestException,
+ "InvalidRequestRegionException": newErrorInvalidRequestRegionException,
"InvalidScopeException": newErrorInvalidScopeException,
"SlowDownException": newErrorSlowDownException,
"UnauthorizedClientException": newErrorUnauthorizedClientException,
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go
index 969f33c37..782bae369 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go
@@ -51,7 +51,7 @@ const (
func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {
- c.SigningName = "awsssooidc"
+ c.SigningName = "sso-oauth"
}
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index 11af63b4d..2c395f5f6 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -1460,7 +1460,15 @@ type AssumeRoleInput struct {
// in the IAM User Guide.
PolicyArns []*PolicyDescriptorType `type:"list"`
- // Reserved for future use.
+ // A list of previously acquired trusted context assertions in the format of
+ // a JSON array. The trusted context assertion is signed and encrypted by Amazon
+ // Web Services STS.
+ //
+ // The following is an example of a ProvidedContext value that includes a single
+ // trusted context assertion and the ARN of the context provider from which
+ // the trusted context assertion was generated.
+ //
+ // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}]
ProvidedContexts []*ProvidedContext `type:"list"`
// The Amazon Resource Name (ARN) of the role to assume.
@@ -3405,14 +3413,18 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
return s
}
-// Reserved for future use.
+// Contains information about the provided context. This includes the signed
+// and encrypted trusted context assertion and the context provider ARN from
+// which the trusted context assertion was generated.
type ProvidedContext struct {
_ struct{} `type:"structure"`
- // Reserved for future use.
+ // The signed and encrypted trusted context assertion generated by the context
+ // provider. The trusted context assertion is signed and encrypted by Amazon
+ // Web Services STS.
ContextAssertion *string `min:"4" type:"string"`
- // Reserved for future use.
+ // The context provider ARN from which the trusted context assertion was generated.
ProviderArn *string `min:"20" type:"string"`
}
diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore
new file mode 100644
index 000000000..2518b3491
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/.gitignore
@@ -0,0 +1,29 @@
+# Eclipse
+.classpath
+.project
+.settings/
+
+# Intellij
+.idea/
+*.iml
+*.iws
+
+# Mac
+.DS_Store
+
+# Maven
+target/
+**/dependency-reduced-pom.xml
+
+# Gradle
+/.gradle
+build/
+*/out/
+*/*/out/
+
+# VS Code
+bin/
+.vscode/
+
+# make
+c.out
diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml
new file mode 100644
index 000000000..f8d1035cc
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/.travis.yml
@@ -0,0 +1,28 @@
+language: go
+sudo: true
+dist: bionic
+
+branches:
+ only:
+ - main
+
+os:
+ - linux
+ - osx
+ # Travis doesn't work with windows and Go tip
+ #- windows
+
+go:
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+before_install:
+ - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi
+ - (cd /tmp/; go get golang.org/x/lint/golint)
+
+script:
+ - make go test -v ./...;
+
diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md
new file mode 100644
index 000000000..8193f4b39
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md
@@ -0,0 +1,352 @@
+# Release (2025-11-03)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.23.2
+ * **Bug Fix**: Adjust the initial sizes of each middleware phase to avoid some unnecessary reallocation.
+ * **Bug Fix**: Avoid unnecessary allocation overhead from the metrics system when not in use.
+
+# Release (2025-10-15)
+
+## General Highlights
+* **Dependency Update**: Bump minimum go version to 1.23.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# Release (2025-09-18)
+
+## Module Highlights
+* `github.com/aws/smithy-go/aws-http-auth`: [v1.1.0](aws-http-auth/CHANGELOG.md#v110-2025-09-18)
+ * **Feature**: Added support for SIG4/SIGV4A querystring authentication.
+
+# Release (2025-08-27)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.23.0
+ * **Feature**: Sort map keys in JSON Document types.
+
+# Release (2025-07-24)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.5
+ * **Feature**: Add HTTP interceptors.
+
+# Release (2025-06-16)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.4
+ * **Bug Fix**: Fix CBOR serd empty check for string and enum fields
+ * **Bug Fix**: Fix HTTP metrics data race.
+ * **Bug Fix**: Replace usages of deprecated ioutil package.
+
+# Release (2025-02-17)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.3
+ * **Dependency Update**: Bump minimum Go version to 1.22 per our language support policy.
+
+# Release (2025-01-21)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.2
+ * **Bug Fix**: Fix HTTP metrics data race.
+ * **Bug Fix**: Replace usages of deprecated ioutil package.
+
+# Release (2024-11-15)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.1
+ * **Bug Fix**: Fix failure to replace URI path segments when their names overlap.
+
+# Release (2024-10-03)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.0
+ * **Feature**: Add HTTP client metrics.
+
+# Release (2024-09-25)
+
+## Module Highlights
+* `github.com/aws/smithy-go/aws-http-auth`: [v1.0.0](aws-http-auth/CHANGELOG.md#v100-2024-09-25)
+ * **Release**: Initial release of module aws-http-auth, which implements generically consumable SigV4 and SigV4a request signing.
+
+# Release (2024-09-19)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.21.0
+ * **Feature**: Add tracing and metrics APIs, and builtin instrumentation for both, in generated clients.
+* `github.com/aws/smithy-go/metrics/smithyotelmetrics`: [v1.0.0](metrics/smithyotelmetrics/CHANGELOG.md#v100-2024-09-19)
+ * **Release**: Initial release of `smithyotelmetrics` module, which is used to adapt an OpenTelemetry SDK meter provider to be used with Smithy clients.
+* `github.com/aws/smithy-go/tracing/smithyoteltracing`: [v1.0.0](tracing/smithyoteltracing/CHANGELOG.md#v100-2024-09-19)
+ * **Release**: Initial release of `smithyoteltracing` module, which is used to adapt an OpenTelemetry SDK tracer provider to be used with Smithy clients.
+
+# Release (2024-08-14)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.4
+ * **Dependency Update**: Bump minimum Go version to 1.21.
+
+# Release (2024-06-27)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.3
+ * **Bug Fix**: Fix encoding/cbor test overflow on x86.
+
+# Release (2024-03-29)
+
+* No change notes available for this release.
+
+# Release (2024-02-21)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.1
+ * **Bug Fix**: Remove runtime dependency on go-cmp.
+
+# Release (2024-02-13)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.0
+ * **Feature**: Add codegen definition for sigv4a trait.
+ * **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# Release (2023-12-07)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.19.0
+ * **Feature**: Support modeled request compression.
+
+# Release (2023-11-30)
+
+* No change notes available for this release.
+
+# Release (2023-11-29)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.18.0
+ * **Feature**: Expose Options() method on generated service clients.
+
+# Release (2023-11-15)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.17.0
+ * **Feature**: Support identity/auth components of client reference architecture.
+
+# Release (2023-10-31)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.16.0
+ * **Feature**: **LANG**: Bump minimum go version to 1.19.
+
+# Release (2023-10-06)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.15.0
+ * **Feature**: Add `http.WithHeaderComment` middleware.
+
+# Release (2023-08-18)
+
+* No change notes available for this release.
+
+# Release (2023-08-07)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.14.1
+ * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation.
+
+# Release (2023-07-31)
+
+## General Highlights
+* **Feature**: Adds support for smithy-modeled endpoint resolution.
+
+# Release (2022-12-02)
+
+* No change notes available for this release.
+
+# Release (2022-10-24)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.13.4
+ * **Bug Fix**: fixed document type checking for encoding nested types
+
+# Release (2022-09-14)
+
+* No change notes available for this release.
+
+# Release (v1.13.2)
+
+* No change notes available for this release.
+
+# Release (v1.13.1)
+
+* No change notes available for this release.
+
+# Release (v1.13.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.13.0
+ * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation.
+
+# Release (v1.12.1)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.12.1
+ * **Bug Fix**: Fixes a bug where JSON object keys were not escaped.
+
+# Release (v1.12.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.12.0
+ * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value.
+
+# Release (v1.11.3)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.11.3
+ * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8.
+
+# Release (v1.11.2)
+
+* No change notes available for this release.
+
+# Release (v1.11.1)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.11.1
+ * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583)
+
+# Release (v1.11.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.11.0
+ * **Feature**: Updates deserialization of header list to supported quoted strings
+
+# Release (v1.10.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.10.0
+ * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type.
+
+# Release (v1.9.1)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.9.1
+ * **Documentation**: Fixes various typos in Go package documentation.
+
+# Release (v1.9.0)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.9.0
+ * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred.
+ * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing.
+
+# Release v1.8.1
+
+### Smithy Go Module
+* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set.
+ * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418)
+
+# Release v1.8.0
+
+### Smithy Go Module
+
+* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324))
+ * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset.
+ * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387)
+
+# Release v1.7.0
+
+### Smithy Go Module
+* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314))
+ * Handle error for defer close call
+* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318))
+ * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata.
+* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310))
+
+### Codegen
+* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310))
+ * Adds support for Smithy Document shapes and supporting types for protocols to implement support
+
+# Release v1.6.0 (2021-07-15)
+
+### Smithy Go Module
+* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316))
+
+### Codegen
+* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316))
+* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315))
+
+# Release v1.5.0 (2021-06-25)
+
+### Smithy Go module
+* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307))
+ * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost.
+
+### Codegen
+* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301))
+* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296))
+* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283))
+* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298))
+* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304))
+* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303))
+
+# Release v1.4.0 (2021-05-06)
+
+### Smithy Go module
+* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267))
+
+### Codegen
+* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289))
+* Add support for httpQueryParams location
+* Add support for model renaming conflict resolution with service closure
+
+# Release v1.3.1 (2021-04-08)
+
+### Smithy Go module
+* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279))
+* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282))
+
+# Release v1.3.0 (2021-04-01)
+
+### Smithy Go module
+* `transport/http`: Add utility to safely join string to url path, and url raw query.
+
+### Codegen
+* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility.
+
+# Release v1.2.0 (2021-03-12)
+
+### Smithy Go module
+* Fix support for parsing shortened year format in HTTP Date header.
+* Fix GitHub APIDiff action workflow to get gorelease tool correctly.
+* Fix codegen artifact unit test for Go 1.16
+
+### Codegen
+* Fix generating paginator nil parameter handling before usage.
+* Fix Serialize unboxed members decorated as required.
+* Add ability to define resolvers at both client construction and operation invocation.
+* Support for extending paginators with custom runtime trait
diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..5b627cfa6
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md
new file mode 100644
index 000000000..1f8d01ff6
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md
@@ -0,0 +1,90 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+### Changelog Documents
+
+(You can SKIP this step if you are only changing the code generator, and not the runtime).
+
+When submitting a pull request please include a changelog file on a folder named `.changelog`.
+These are used to generate the content `CHANGELOG.md` and Release Notes. The format of the file is as follows:
+
+```
+{
+ "id": "12345678-1234-1234-1234-123456789012"
+ "type": "bugfix"
+ "collapse": true
+ "description": "Fix improper use of printf-style functions.",
+ "modules": [
+ "."
+ ]
+}
+```
+
+* id: a UUID. This should also be used for the name of the file, so if your id is `12345678-1234-1234-1234-123456789012` the file should be named `12345678-1234-1234-1234-123456789012.json/`
+* type: one of the following:
+ * bugfix: Fixing an existing bug
+ * Feature: Adding a new feature to an existing service
+ * Release: Releasing a new module
+ * Dependency: Updating dependencies
+ * Announcement: Making an announcement, like deprecation of a module
+* collapse: whether this change should appear separately on the release notes on every module listed on `modules` (`"collapse": false`), or if it should show up as a single entry (`"collapse": true`)
+ * For the smithy-go repository this should always be `false`
+* description: Description of this change. Most of the times is the same as the title of the PR
+* modules: which Go modules does this change impact. The root module is expressed as "."
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE
new file mode 100644
index 000000000..67db85882
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile
new file mode 100644
index 000000000..a12b124d5
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/Makefile
@@ -0,0 +1,130 @@
+PRE_RELEASE_VERSION ?=
+
+RELEASE_MANIFEST_FILE ?=
+RELEASE_CHGLOG_DESC_FILE ?=
+
+REPOTOOLS_VERSION ?= latest
+REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools
+REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?=
+REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION}
+REPOTOOLS_CMD_EACHMODULE = ${REPOTOOLS_MODULE}/cmd/eachmodule@${REPOTOOLS_VERSION}
+
+UNIT_TEST_TAGS=
+BUILD_TAGS=
+
+ifneq ($(PRE_RELEASE_VERSION),)
+ REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION}
+endif
+
+smithy-publish-local:
+ cd codegen && ./gradlew publishToMavenLocal
+
+smithy-build:
+ cd codegen && ./gradlew build
+
+smithy-clean:
+ cd codegen && ./gradlew clean
+
+GRADLE_RETRIES := 3
+GRADLE_SLEEP := 2
+
+# We're making a call to ./gradlew to trigger downloading Gradle and
+# starting the daemon. Any call works, so using `./gradlew help`
+ensure-gradle-up:
+ @cd codegen && for i in $(shell seq 1 $(GRADLE_RETRIES)); do \
+ echo "Checking if Gradle daemon is up, attempt $$i..."; \
+ if ./gradlew help; then \
+ echo "Gradle daemon is up!"; \
+ exit 0; \
+ fi; \
+ echo "Failed to start Gradle, retrying in $(GRADLE_SLEEP) seconds..."; \
+ sleep $(GRADLE_SLEEP); \
+ done; \
+ echo "Failed to start Gradle after $(GRADLE_RETRIES) attempts."; \
+ exit 1
+
+##################
+# Linting/Verify #
+##################
+.PHONY: verify vet cover
+
+verify: vet
+
+vet: vet-modules-.
+
+vet-modules-%:
+ go run ${REPOTOOLS_CMD_EACHMODULE} -p $(subst vet-modules-,,$@) \
+ "go vet ${BUILD_TAGS} --all ./..."
+
+cover:
+ go test ${BUILD_TAGS} -coverprofile c.out ./...
+ @cover=`go tool cover -func c.out | grep '^total:' | awk '{ print $$3+0 }'`; \
+ echo "total (statements): $$cover%";
+
+################
+# Unit Testing #
+################
+.PHONY: test unit unit-race
+
+test: unit-race
+
+unit: verify unit-modules-.
+
+unit-modules-%:
+ go run ${REPOTOOLS_CMD_EACHMODULE} -p $(subst unit-modules-,,$@) \
+ "go test -timeout=1m ${UNIT_TEST_TAGS} ./..."
+
+unit-race: verify unit-race-modules-.
+
+unit-race-modules-%:
+ go run ${REPOTOOLS_CMD_EACHMODULE} -p $(subst unit-race-modules-,,$@) \
+ "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..."
+
+
+#####################
+# Release Process #
+#####################
+.PHONY: preview-release pre-release-validation release
+
+preview-release:
+ go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS}
+
+pre-release-validation:
+ @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \
+ echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \
+ fi
+ @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \
+ echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \
+ fi
+
+release: pre-release-validation
+ go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS}
+ go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE}
+ go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE}
+ go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE}
+ go run ${REPOTOOLS_CMD_CHANGELOG} rm -all
+ go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE}
+
+module-version:
+ @go run ${REPOTOOLS_CMD_MODULE_VERSION} .
+
+##############
+# Repo Tools #
+##############
+.PHONY: install-changelog
+
+external-changelog:
+ mkdir -p .changelog
+ cp changelog-template.json .changelog/00000000-0000-0000-0000-000000000000.json
+ @echo "Generate a new UUID and update the file at .changelog/00000000-0000-0000-0000-000000000000.json"
+ @echo "Make sure to rename the file with your new id, like .changelog/12345678-1234-1234-1234-123456789012.json"
+ @echo "See CONTRIBUTING.md 'Changelog Documents' and an example at https://github.com/aws/smithy-go/pull/543/files"
+
+install-changelog:
+ go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE
new file mode 100644
index 000000000..616fc5889
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/NOTICE
@@ -0,0 +1 @@
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md
new file mode 100644
index 000000000..ddce37b99
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/README.md
@@ -0,0 +1,100 @@
+# Smithy Go
+
+[](https://github.com/aws/smithy-go/actions/workflows/go.yml)[](https://github.com/aws/smithy-go/actions/workflows/codegen.yml)
+
+[Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime.
+
+The smithy-go runtime requires a minimum version of Go 1.23.
+
+**WARNING: All interfaces are subject to change.**
+
+## :no_entry_sign: DO NOT use the code generators in this repository
+
+**The code generators in this repository do not generate working clients at
+this time.**
+
+In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java),
+such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html),
+in order to generate transport mechanisms and serialization/deserialization
+code ("serde") accordingly.
+
+The code generator does not currently support any protocols out of the box.
+Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html)
+exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are
+tracking the movement of those out of the SDK into smithy-go in
+[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no
+timeline for doing so.
+
+## Plugins
+
+This repository implements the following Smithy build plugins:
+
+| ID | GAV prefix | Description |
+|----|------------|-------------|
+| `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. |
+| `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. |
+| `go-shape-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go shape code generation (types only) for Smithy models. |
+
+**NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.**
+
+## `go-codegen`
+
+### Configuration
+
+[`GoSettings`](codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/GoSettings.java)
+contains all of the settings enabled from `smithy-build.json` and helper
+methods and types. The up-to-date list of top-level properties enabled for
+`go-client-codegen` can be found in `GoSettings::from()`.
+
+| Setting | Type | Required | Description |
+|-----------------|---------|----------|-----------------------------------------------------------------------------------------------------------------------------|
+| `service` | string | yes | The Shape ID of the service for which to generate the client. |
+| `module` | string | yes | Name of the module in `generated.json` (and `go.mod` if `generateGoMod` is enabled) and `doc.go`. |
+| `generateGoMod` | boolean | | Whether to generate a default `go.mod` file. The default value is `false`. |
+| `goDirective` | string | | [Go directive](https://go.dev/ref/mod#go-mod-file-go) of the module. The default value is the minimum supported Go version. |
+
+### Supported protocols
+
+| Protocol | Notes |
+|----------|-------|
+| [`smithy.protocols#rpcv2Cbor`](https://smithy.io/2.0/additional-specs/protocols/smithy-rpc-v2.html) | Event streaming not yet implemented. |
+
+### Example
+
+This example applies the `go-codegen` build plugin to the Smithy quickstart
+example created from `smithy init`:
+
+```json
+{
+ "version": "1.0",
+ "sources": [
+ "models"
+ ],
+ "maven": {
+ "dependencies": [
+ "software.amazon.smithy.go:smithy-go-codegen:0.1.0"
+ ]
+ },
+ "plugins": {
+ "go-codegen": {
+ "service": "example.weather#Weather",
+ "module": "github.com/example/weather",
+ "generateGoMod": true,
+ "goDirective": "1.23"
+ }
+ }
+}
+```
+
+## `go-server-codegen`
+
+This plugin is a work-in-progress and is currently undocumented.
+
+## `go-shape-codegen`
+
+This plugin is a work-in-progress and is currently undocumented.
+
+## License
+
+This project is licensed under the Apache-2.0 License.
+
diff --git a/vendor/github.com/aws/smithy-go/auth/auth.go b/vendor/github.com/aws/smithy-go/auth/auth.go
new file mode 100644
index 000000000..5bdb70c9a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/auth.go
@@ -0,0 +1,3 @@
+// Package auth defines protocol-agnostic authentication types for smithy
+// clients.
+package auth
diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go
new file mode 100644
index 000000000..1c9b9715c
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go
@@ -0,0 +1,3 @@
+// Package bearer provides middleware and utilities for authenticating API
+// operation calls with a Bearer Token.
+package bearer
diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go
new file mode 100644
index 000000000..8c7d72099
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go
@@ -0,0 +1,104 @@
+package bearer
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Message is the middleware stack's request transport message value.
+type Message interface{}
+
+// Signer provides an interface for implementations to decorate a request
+// message with a bearer token. The signer is responsible for validating the
+// message type is compatible with the signer.
+type Signer interface {
+ SignWithBearerToken(context.Context, Token, Message) (Message, error)
+}
+
+// AuthenticationMiddleware provides the Finalize middleware step for signing
+// an request message with a bearer token.
+type AuthenticationMiddleware struct {
+ signer Signer
+ tokenProvider TokenProvider
+}
+
+// AddAuthenticationMiddleware helper adds the AuthenticationMiddleware to the
+// middleware Stack in the Finalize step with the options provided.
+func AddAuthenticationMiddleware(s *middleware.Stack, signer Signer, tokenProvider TokenProvider) error {
+ return s.Finalize.Add(
+ NewAuthenticationMiddleware(signer, tokenProvider),
+ middleware.After,
+ )
+}
+
+// NewAuthenticationMiddleware returns an initialized AuthenticationMiddleware.
+func NewAuthenticationMiddleware(signer Signer, tokenProvider TokenProvider) *AuthenticationMiddleware {
+ return &AuthenticationMiddleware{
+ signer: signer,
+ tokenProvider: tokenProvider,
+ }
+}
+
+const authenticationMiddlewareID = "BearerTokenAuthentication"
+
+// ID returns the resolver identifier
+func (m *AuthenticationMiddleware) ID() string {
+ return authenticationMiddlewareID
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface in order to
+// update the request with bearer token authentication.
+func (m *AuthenticationMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ token, err := m.tokenProvider.RetrieveBearerToken(ctx)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed AuthenticationMiddleware wrap message, %w", err)
+ }
+
+ signedMessage, err := m.signer.SignWithBearerToken(ctx, token, in.Request)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed AuthenticationMiddleware sign message, %w", err)
+ }
+
+ in.Request = signedMessage
+ return next.HandleFinalize(ctx, in)
+}
+
+// SignHTTPSMessage provides a bearer token authentication implementation that
+// will sign the message with the provided bearer token.
+//
+// Will fail if the message is not a smithy-go HTTP request or the request is
+// not HTTPS.
+type SignHTTPSMessage struct{}
+
+// NewSignHTTPSMessage returns an initialized signer for HTTP messages.
+func NewSignHTTPSMessage() *SignHTTPSMessage {
+ return &SignHTTPSMessage{}
+}
+
+// SignWithBearerToken returns a copy of the HTTP request with the bearer token
+// added via the "Authorization" header, per RFC 6750, https://datatracker.ietf.org/doc/html/rfc6750.
+//
+// Returns an error if the request's URL scheme is not HTTPS, or the request
+// message is not an smithy-go HTTP Request pointer type.
+func (SignHTTPSMessage) SignWithBearerToken(ctx context.Context, token Token, message Message) (Message, error) {
+ req, ok := message.(*smithyhttp.Request)
+ if !ok {
+ return nil, fmt.Errorf("expect smithy-go HTTP Request, got %T", message)
+ }
+
+ if !req.IsHTTPS() {
+ return nil, fmt.Errorf("bearer token with HTTP request requires HTTPS")
+ }
+
+ reqClone := req.Clone()
+ reqClone.Header.Set("Authorization", "Bearer "+token.Value)
+
+ return reqClone, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token.go b/vendor/github.com/aws/smithy-go/auth/bearer/token.go
new file mode 100644
index 000000000..be260d4c7
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/bearer/token.go
@@ -0,0 +1,50 @@
+package bearer
+
+import (
+ "context"
+ "time"
+)
+
+// Token provides a type wrapping a bearer token and expiration metadata.
+type Token struct {
+ Value string
+
+ CanExpire bool
+ Expires time.Time
+}
+
+// Expired returns if the token's Expires time is before or equal to the time
+// provided. If CanExpires is false, Expired will always return false.
+func (t Token) Expired(now time.Time) bool {
+ if !t.CanExpire {
+ return false
+ }
+ now = now.Round(0)
+ return now.Equal(t.Expires) || now.After(t.Expires)
+}
+
+// TokenProvider provides interface for retrieving bearer tokens.
+type TokenProvider interface {
+ RetrieveBearerToken(context.Context) (Token, error)
+}
+
+// TokenProviderFunc provides a helper utility to wrap a function as a type
+// that implements the TokenProvider interface.
+type TokenProviderFunc func(context.Context) (Token, error)
+
+// RetrieveBearerToken calls the wrapped function, returning the Token or
+// error.
+func (fn TokenProviderFunc) RetrieveBearerToken(ctx context.Context) (Token, error) {
+ return fn(ctx)
+}
+
+// StaticTokenProvider provides a utility for wrapping a static bearer token
+// value within an implementation of a token provider.
+type StaticTokenProvider struct {
+ Token Token
+}
+
+// RetrieveBearerToken returns the static token specified.
+func (s StaticTokenProvider) RetrieveBearerToken(context.Context) (Token, error) {
+ return s.Token, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go
new file mode 100644
index 000000000..223ddf52b
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go
@@ -0,0 +1,208 @@
+package bearer
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ smithycontext "github.com/aws/smithy-go/context"
+ "github.com/aws/smithy-go/internal/sync/singleflight"
+)
+
+// package variable that can be override in unit tests.
+var timeNow = time.Now
+
+// TokenCacheOptions provides a set of optional configuration options for the
+// TokenCache TokenProvider.
+type TokenCacheOptions struct {
+ // The duration before the token will expire when the credentials will be
+ // refreshed. If DisableAsyncRefresh is true, the RetrieveBearerToken calls
+ // will be blocking.
+ //
+ // Asynchronous refreshes are deduplicated, and only one will be in-flight
+ // at a time. If the token expires while an asynchronous refresh is in
+ // flight, the next call to RetrieveBearerToken will block on that refresh
+ // to return.
+ RefreshBeforeExpires time.Duration
+
+ // The timeout the underlying TokenProvider's RetrieveBearerToken call must
+ // return within, or will be canceled. Defaults to 0, no timeout.
+ //
+ // If 0 timeout, its possible for the underlying tokenProvider's
+ // RetrieveBearerToken call to block forever. Preventing subsequent
+ // TokenCache attempts to refresh the token.
+ //
+ // If this timeout is reached all pending deduplicated calls to
+ // TokenCache RetrieveBearerToken will fail with an error.
+ RetrieveBearerTokenTimeout time.Duration
+
+ // The minimum duration between asynchronous refresh attempts. If the next
+ // asynchronous recent refresh attempt was within the minimum delay
+ // duration, the call to retrieve will return the current cached token, if
+ // not expired.
+ //
+ // The asynchronous retrieve is deduplicated across multiple calls when
+ // RetrieveBearerToken is called. The asynchronous retrieve is not a
+ // periodic task. It is only performed when the token has not yet expired,
+ // and the current item is within the RefreshBeforeExpires window, and the
+ // TokenCache's RetrieveBearerToken method is called.
+ //
+ // If 0, (default) there will be no minimum delay between asynchronous
+ // refresh attempts.
+ //
+ // If DisableAsyncRefresh is true, this option is ignored.
+ AsyncRefreshMinimumDelay time.Duration
+
+ // Sets if the TokenCache will attempt to refresh the token in the
+ // background asynchronously instead of blocking for credentials to be
+ // refreshed. If disabled token refresh will be blocking.
+ //
+ // The first call to RetrieveBearerToken will always be blocking, because
+ // there is no cached token.
+ DisableAsyncRefresh bool
+}
+
+// TokenCache provides an utility to cache Bearer Authentication tokens from a
+// wrapped TokenProvider. The TokenCache can be has options to configure the
+// cache's early and asynchronous refresh of the token.
+type TokenCache struct {
+ options TokenCacheOptions
+ provider TokenProvider
+
+ cachedToken atomic.Value
+ lastRefreshAttemptTime atomic.Value
+ sfGroup singleflight.Group
+}
+
+// NewTokenCache returns a initialized TokenCache that implements the
+// TokenProvider interface. Wrapping the provider passed in. Also taking a set
+// of optional functional option parameters to configure the token cache.
+func NewTokenCache(provider TokenProvider, optFns ...func(*TokenCacheOptions)) *TokenCache {
+ var options TokenCacheOptions
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &TokenCache{
+ options: options,
+ provider: provider,
+ }
+}
+
+// RetrieveBearerToken returns the token if it could be obtained, or error if a
+// valid token could not be retrieved.
+//
+// The passed in Context's cancel/deadline/timeout will impacting only this
+// individual retrieve call and not any other already queued up calls. This
+// means underlying provider's RetrieveBearerToken calls could block for ever,
+// and not be canceled with the Context. Set RetrieveBearerTokenTimeout to
+// provide a timeout, preventing the underlying TokenProvider blocking forever.
+//
+// By default, if the passed in Context is canceled, all of its values will be
+// considered expired. The wrapped TokenProvider will not be able to lookup the
+// values from the Context once it is expired. This is done to protect against
+// expired values no longer being valid. To disable this behavior, use
+// smithy-go's context.WithPreserveExpiredValues to add a value to the Context
+// before calling RetrieveBearerToken to enable support for expired values.
+//
+// Without RetrieveBearerTokenTimeout there is the potential for a underlying
+// Provider's RetrieveBearerToken call to sit forever. Blocking in subsequent
+// attempts at refreshing the token.
+func (p *TokenCache) RetrieveBearerToken(ctx context.Context) (Token, error) {
+ cachedToken, ok := p.getCachedToken()
+ if !ok || cachedToken.Expired(timeNow()) {
+ return p.refreshBearerToken(ctx)
+ }
+
+ // Check if the token should be refreshed before it expires.
+ refreshToken := cachedToken.Expired(timeNow().Add(p.options.RefreshBeforeExpires))
+ if !refreshToken {
+ return cachedToken, nil
+ }
+
+ if p.options.DisableAsyncRefresh {
+ return p.refreshBearerToken(ctx)
+ }
+
+ p.tryAsyncRefresh(ctx)
+
+ return cachedToken, nil
+}
+
+// tryAsyncRefresh attempts to asynchronously refresh the token returning the
+// already cached token. If it AsyncRefreshMinimumDelay option is not zero, and
+// the duration since the last refresh is less than that value, nothing will be
+// done.
+func (p *TokenCache) tryAsyncRefresh(ctx context.Context) {
+ if p.options.AsyncRefreshMinimumDelay != 0 {
+ var lastRefreshAttempt time.Time
+ if v := p.lastRefreshAttemptTime.Load(); v != nil {
+ lastRefreshAttempt = v.(time.Time)
+ }
+
+ if timeNow().Before(lastRefreshAttempt.Add(p.options.AsyncRefreshMinimumDelay)) {
+ return
+ }
+ }
+
+ // Ignore the returned channel so this won't be blocking, and limit the
+ // number of additional goroutines created.
+ p.sfGroup.DoChan("async-refresh", func() (interface{}, error) {
+ res, err := p.refreshBearerToken(ctx)
+ if p.options.AsyncRefreshMinimumDelay != 0 {
+ var refreshAttempt time.Time
+ if err != nil {
+ refreshAttempt = timeNow()
+ }
+ p.lastRefreshAttemptTime.Store(refreshAttempt)
+ }
+
+ return res, err
+ })
+}
+
+func (p *TokenCache) refreshBearerToken(ctx context.Context) (Token, error) {
+ resCh := p.sfGroup.DoChan("refresh-token", func() (interface{}, error) {
+ ctx := smithycontext.WithSuppressCancel(ctx)
+ if v := p.options.RetrieveBearerTokenTimeout; v != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, v)
+ defer cancel()
+ }
+ return p.singleRetrieve(ctx)
+ })
+
+ select {
+ case res := <-resCh:
+ return res.Val.(Token), res.Err
+ case <-ctx.Done():
+ return Token{}, fmt.Errorf("retrieve bearer token canceled, %w", ctx.Err())
+ }
+}
+
+func (p *TokenCache) singleRetrieve(ctx context.Context) (interface{}, error) {
+ token, err := p.provider.RetrieveBearerToken(ctx)
+ if err != nil {
+ return Token{}, fmt.Errorf("failed to retrieve bearer token, %w", err)
+ }
+
+ p.cachedToken.Store(&token)
+ return token, nil
+}
+
+// getCachedToken returns the currently cached token and true if found. Returns
+// false if no token is cached.
+func (p *TokenCache) getCachedToken() (Token, bool) {
+ v := p.cachedToken.Load()
+ if v == nil {
+ return Token{}, false
+ }
+
+ t := v.(*Token)
+ if t == nil || t.Value == "" {
+ return Token{}, false
+ }
+
+ return *t, true
+}
diff --git a/vendor/github.com/aws/smithy-go/auth/identity.go b/vendor/github.com/aws/smithy-go/auth/identity.go
new file mode 100644
index 000000000..ba8cf70d4
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/identity.go
@@ -0,0 +1,47 @@
+package auth
+
+import (
+ "context"
+ "time"
+
+ "github.com/aws/smithy-go"
+)
+
+// Identity contains information that identifies who the user making the
+// request is.
+type Identity interface {
+ Expiration() time.Time
+}
+
+// IdentityResolver defines the interface through which an Identity is
+// retrieved.
+type IdentityResolver interface {
+ GetIdentity(context.Context, smithy.Properties) (Identity, error)
+}
+
+// IdentityResolverOptions defines the interface through which an entity can be
+// queried to retrieve an IdentityResolver for a given auth scheme.
+type IdentityResolverOptions interface {
+ GetIdentityResolver(schemeID string) IdentityResolver
+}
+
+// AnonymousIdentity is a sentinel to indicate no identity.
+type AnonymousIdentity struct{}
+
+var _ Identity = (*AnonymousIdentity)(nil)
+
+// Expiration returns the zero value for time, as anonymous identity never
+// expires.
+func (*AnonymousIdentity) Expiration() time.Time {
+ return time.Time{}
+}
+
+// AnonymousIdentityResolver returns AnonymousIdentity.
+type AnonymousIdentityResolver struct{}
+
+var _ IdentityResolver = (*AnonymousIdentityResolver)(nil)
+
+// GetIdentity returns AnonymousIdentity.
+func (*AnonymousIdentityResolver) GetIdentity(_ context.Context, _ smithy.Properties) (Identity, error) {
+ return &AnonymousIdentity{}, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/auth/option.go b/vendor/github.com/aws/smithy-go/auth/option.go
new file mode 100644
index 000000000..d5dabff04
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/option.go
@@ -0,0 +1,25 @@
+package auth
+
+import "github.com/aws/smithy-go"
+
+type (
+ authOptionsKey struct{}
+)
+
+// Option represents a possible authentication method for an operation.
+type Option struct {
+ SchemeID string
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+// GetAuthOptions gets auth Options from Properties.
+func GetAuthOptions(p *smithy.Properties) ([]*Option, bool) {
+ v, ok := p.Get(authOptionsKey{}).([]*Option)
+ return v, ok
+}
+
+// SetAuthOptions sets auth Options on Properties.
+func SetAuthOptions(p *smithy.Properties, options []*Option) {
+ p.Set(authOptionsKey{}, options)
+}
diff --git a/vendor/github.com/aws/smithy-go/auth/scheme_id.go b/vendor/github.com/aws/smithy-go/auth/scheme_id.go
new file mode 100644
index 000000000..fb6a57c64
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/scheme_id.go
@@ -0,0 +1,20 @@
+package auth
+
+// Anonymous
+const (
+ SchemeIDAnonymous = "smithy.api#noAuth"
+)
+
+// HTTP auth schemes
+const (
+ SchemeIDHTTPBasic = "smithy.api#httpBasicAuth"
+ SchemeIDHTTPDigest = "smithy.api#httpDigestAuth"
+ SchemeIDHTTPBearer = "smithy.api#httpBearerAuth"
+ SchemeIDHTTPAPIKey = "smithy.api#httpApiKeyAuth"
+)
+
+// AWS auth schemes
+const (
+ SchemeIDSigV4 = "aws.auth#sigv4"
+ SchemeIDSigV4A = "aws.auth#sigv4a"
+)
diff --git a/vendor/github.com/aws/smithy-go/changelog-template.json b/vendor/github.com/aws/smithy-go/changelog-template.json
new file mode 100644
index 000000000..d36e2b3e1
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/changelog-template.json
@@ -0,0 +1,9 @@
+{
+ "id": "00000000-0000-0000-0000-000000000000",
+ "type": "feature|bugfix|dependency",
+ "description": "Description of your changes",
+ "collapse": false,
+ "modules": [
+ "."
+ ]
+}
diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/cache.go b/vendor/github.com/aws/smithy-go/container/private/cache/cache.go
new file mode 100644
index 000000000..69af87751
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/container/private/cache/cache.go
@@ -0,0 +1,19 @@
+// Package cache defines the interface for a key-based data store.
+//
+// This package is designated as private and is intended for use only by the
+// smithy client runtime. The exported API therein is not considered stable and
+// is subject to breaking changes without notice.
+package cache
+
+// Cache defines the interface for an opaquely-typed, key-based data store.
+//
+// The thread-safety of this interface is undefined and is dictated by
+// implementations.
+type Cache interface {
+ // Retrieve the value associated with the given key. The returned boolean
+ // indicates whether the cache held a value for the given key.
+ Get(k interface{}) (interface{}, bool)
+
+ // Store a value under the given key.
+ Put(k interface{}, v interface{})
+}
diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go b/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go
new file mode 100644
index 000000000..02ecb0a32
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go
@@ -0,0 +1,63 @@
+// Package lru implements [cache.Cache] with an LRU eviction policy.
+//
+// This implementation is NOT thread-safe.
+//
+// This package is designated as private and is intended for use only by the
+// smithy client runtime. The exported API therein is not considered stable and
+// is subject to breaking changes without notice.
+package lru
+
+import (
+ "container/list"
+
+ "github.com/aws/smithy-go/container/private/cache"
+)
+
+// New creates a new LRU cache with the given capacity.
+func New(cap int) cache.Cache {
+ return &lru{
+ entries: make(map[interface{}]*list.Element, cap),
+ cap: cap,
+ mru: list.New(),
+ }
+}
+
+type lru struct {
+ entries map[interface{}]*list.Element
+ cap int
+
+ mru *list.List // least-recently used is at the back
+}
+
+type element struct {
+ key interface{}
+ value interface{}
+}
+
+func (l *lru) Get(k interface{}) (interface{}, bool) {
+ e, ok := l.entries[k]
+ if !ok {
+ return nil, false
+ }
+
+ l.mru.MoveToFront(e)
+ return e.Value.(*element).value, true
+}
+
+func (l *lru) Put(k interface{}, v interface{}) {
+ if len(l.entries) == l.cap {
+ l.evict()
+ }
+
+ ev := &element{
+ key: k,
+ value: v,
+ }
+ e := l.mru.PushFront(ev)
+ l.entries[k] = e
+}
+
+func (l *lru) evict() {
+ e := l.mru.Remove(l.mru.Back())
+ delete(l.entries, e.(*element).key)
+}
diff --git a/vendor/github.com/aws/smithy-go/context/suppress_expired.go b/vendor/github.com/aws/smithy-go/context/suppress_expired.go
new file mode 100644
index 000000000..a39b84a27
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/context/suppress_expired.go
@@ -0,0 +1,81 @@
+package context
+
+import "context"
+
+// valueOnlyContext provides a utility to preserve only the values of a
+// Context. Suppressing any cancellation or deadline on that context being
+// propagated downstream of this value.
+//
+// If preserveExpiredValues is false (default), and the valueCtx is canceled,
+// calls to lookup values with the Values method, will always return nil. Setting
+// preserveExpiredValues to true, will allow the valueOnlyContext to lookup
+// values in valueCtx even if valueCtx is canceled.
+//
+// Based on the Go standard libraries net/lookup.go onlyValuesCtx utility.
+// https://github.com/golang/go/blob/da2773fe3e2f6106634673a38dc3a6eb875fe7d8/src/net/lookup.go
+type valueOnlyContext struct {
+ context.Context
+
+ preserveExpiredValues bool
+ valuesCtx context.Context
+}
+
+var _ context.Context = (*valueOnlyContext)(nil)
+
+// Value looks up the key, returning its value. If configured to not preserve
+// values of expired context, and the wrapping context is canceled, nil will be
+// returned.
+func (v *valueOnlyContext) Value(key interface{}) interface{} {
+ if !v.preserveExpiredValues {
+ select {
+ case <-v.valuesCtx.Done():
+ return nil
+ default:
+ }
+ }
+
+ return v.valuesCtx.Value(key)
+}
+
+// WithSuppressCancel wraps the Context value, suppressing its deadline and
+// cancellation events being propagated downstream to consumer of the returned
+// context.
+//
+// By default the wrapped Context's Values are available downstream until the
+// wrapped Context is canceled. Once the wrapped Context is canceled, Values
+// method called on the context return will no longer lookup any key. As they
+// are now considered expired.
+//
+// To override this behavior, use WithPreserveExpiredValues on the Context
+// before it is wrapped by WithSuppressCancel. This will make the Context
+// returned by WithSuppressCancel allow lookup of expired values.
+func WithSuppressCancel(ctx context.Context) context.Context {
+ return &valueOnlyContext{
+ Context: context.Background(),
+ valuesCtx: ctx,
+
+ preserveExpiredValues: GetPreserveExpiredValues(ctx),
+ }
+}
+
+type preserveExpiredValuesKey struct{}
+
+// WithPreserveExpiredValues adds a Value to the Context if expired values
+// should be preserved, and looked up by a Context wrapped by
+// WithSuppressCancel.
+//
+// WithPreserveExpiredValues must be added as a value to a Context, before that
+// Context is wrapped by WithSuppressCancel
+func WithPreserveExpiredValues(ctx context.Context, enable bool) context.Context {
+ return context.WithValue(ctx, preserveExpiredValuesKey{}, enable)
+}
+
+// GetPreserveExpiredValues looks up, and returns the PreserveExpressValues
+// value in the context. Returning true if enabled, false otherwise.
+func GetPreserveExpiredValues(ctx context.Context) bool {
+ v := ctx.Value(preserveExpiredValuesKey{})
+ if v != nil {
+ return v.(bool)
+ }
+ return false
+}
diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go
new file mode 100644
index 000000000..87b0c74b7
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/doc.go
@@ -0,0 +1,2 @@
+// Package smithy provides the core components for a Smithy SDK.
+package smithy
diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go
new file mode 100644
index 000000000..dec498c57
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/document.go
@@ -0,0 +1,10 @@
+package smithy
+
+// Document provides access to loosely structured data in a document-like
+// format.
+//
+// Deprecated: See the github.com/aws/smithy-go/document package.
+type Document interface {
+ UnmarshalDocument(interface{}) error
+ GetValue() (interface{}, error)
+}
diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go
new file mode 100644
index 000000000..03055b7a1
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/document/doc.go
@@ -0,0 +1,12 @@
+// Package document provides interface definitions and error types for document types.
+//
+// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send
+// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8
+// strings to these values.
+//
+// API Clients expose document constructors in their respective client document packages which must be used to
+// Marshal and Unmarshal Go types to and from their respective protocol representations.
+//
+// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from
+// document types.
+package document
diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go
new file mode 100644
index 000000000..8f852d95c
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/document/document.go
@@ -0,0 +1,153 @@
+package document
+
+import (
+ "fmt"
+ "math/big"
+ "strconv"
+)
+
+// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and
+// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling.
+//
+// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs.
+// Anonymous nested types are flattened based on Go anonymous type visibility.
+//
+// When defining struct types. the `document` struct tag can be used to control how the value will be
+// marshaled into the resulting protocol document.
+//
+// // Field is ignored
+// Field int `document:"-"`
+//
+// // Field object of key "myName"
+// Field int `document:"myName"`
+//
+// // Field object key of key "myName", and
+// // Field is omitted if the field is a zero value for the type.
+// Field int `document:"myName,omitempty"`
+//
+// // Field object key of "Field", and
+// // Field is omitted if the field is a zero value for the type.
+// Field int `document:",omitempty"`
+//
+// All struct fields, including anonymous fields, are marshaled unless the
+// any of the following conditions are meet.
+//
+// - the field is not exported
+// - document field tag is "-"
+// - document field tag specifies "omitempty", and is a zero value.
+//
+// Pointer and interface values are encoded as the value pointed to or
+// contained in the interface. A nil value encodes as a null
+// value unless `omitempty` struct tag is provided.
+//
+// Channel, complex, and function values are not encoded and will be skipped
+// when walking the value to be marshaled.
+//
+// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented
+// by your application as a string or numerical representation.
+//
+// Errors that occur when marshaling will stop the marshaler, and return the error.
+//
+// Marshal cannot represent cyclic data structures and will not handle them.
+// Passing cyclic structures to Marshal will result in an infinite recursion.
+type Marshaler interface {
+ MarshalSmithyDocument() ([]byte, error)
+}
+
+// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and
+// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be
+// returned.
+//
+// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document`
+// struct field tag for controlling how struct fields are unmarshaled.
+//
+// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document
+// into an empty interface the Unmarshaler will store one of these values:
+// bool, for boolean values
+// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float)
+// string, for string values
+// []interface{}, for array values
+// map[string]interface{}, for objects
+// nil, for null values
+//
+// When unmarshaling, any error that occurs will halt the unmarshal and return the error.
+type Unmarshaler interface {
+ UnmarshalSmithyDocument(v interface{}) error
+}
+
+type noSerde interface {
+ noSmithyDocumentSerde()
+}
+
+// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled
+// into a protocol document.
+type NoSerde struct{}
+
+func (n NoSerde) noSmithyDocumentSerde() {}
+
+var _ noSerde = (*NoSerde)(nil)
+
+// IsNoSerde returns whether the given type implements the no smithy document serde interface.
+func IsNoSerde(x interface{}) bool {
+ _, ok := x.(noSerde)
+ return ok
+}
+
+// Number is an arbitrary precision numerical value
+type Number string
+
+// Int64 returns the number as a string.
+func (n Number) String() string {
+ return string(n)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return n.intOfBitSize(64)
+}
+
+func (n Number) intOfBitSize(bitSize int) (int64, error) {
+ return strconv.ParseInt(string(n), 10, bitSize)
+}
+
+// Uint64 returns the number as a uint64.
+func (n Number) Uint64() (uint64, error) {
+ return n.uintOfBitSize(64)
+}
+
+func (n Number) uintOfBitSize(bitSize int) (uint64, error) {
+ return strconv.ParseUint(string(n), 10, bitSize)
+}
+
+// Float32 returns the number parsed as a 32-bit float, returns a float64.
+func (n Number) Float32() (float64, error) {
+ return n.floatOfBitSize(32)
+}
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return n.floatOfBitSize(64)
+}
+
+// Float64 returns the number as a float64.
+func (n Number) floatOfBitSize(bitSize int) (float64, error) {
+ return strconv.ParseFloat(string(n), bitSize)
+}
+
+// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails.
+func (n Number) BigFloat() (*big.Float, error) {
+ f, ok := (&big.Float{}).SetString(string(n))
+ if !ok {
+ return nil, fmt.Errorf("failed to convert to big.Float")
+ }
+ return f, nil
+}
+
+// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails.
+func (n Number) BigInt() (*big.Int, error) {
+ f, ok := (&big.Int{}).SetString(string(n), 10)
+ if !ok {
+ return nil, fmt.Errorf("failed to convert to big.Float")
+ }
+ return f, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go
new file mode 100644
index 000000000..046a7a765
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/document/errors.go
@@ -0,0 +1,75 @@
+package document
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// UnmarshalTypeError is an error type representing an error
+// unmarshaling a Smithy document to a Go value type. This is different
+// from UnmarshalError in that it does not wrap an underlying error type.
+type UnmarshalTypeError struct {
+ Value string
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// Satisfying the error interface.
+func (e *UnmarshalTypeError) Error() string {
+ return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s",
+ e.Value, e.Type.String())
+}
+
+// An InvalidUnmarshalError is an error type representing an invalid type
+// encountered while unmarshaling a Smithy document to a Go value type.
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// Satisfying the error interface.
+func (e *InvalidUnmarshalError) Error() string {
+ var msg string
+ if e.Type == nil {
+ msg = "cannot unmarshal to nil value"
+ } else if e.Type.Kind() != reflect.Ptr {
+ msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String())
+ } else {
+ msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String())
+ }
+
+ return fmt.Sprintf("unmarshal failed, %s", msg)
+}
+
+// An UnmarshalError wraps an error that occurred while unmarshaling a
+// Smithy document into a Go type. This is different from
+// UnmarshalTypeError in that it wraps the underlying error that occurred.
+type UnmarshalError struct {
+ Err error
+ Value string
+ Type reflect.Type
+}
+
+// Unwrap returns the underlying unmarshaling error
+func (e *UnmarshalError) Unwrap() error {
+ return e.Err
+}
+
+// Error returns the string representation of the error.
+// Satisfying the error interface.
+func (e *UnmarshalError) Error() string {
+ return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v",
+ e.Value, e.Type.String(), e.Err)
+}
+
+// An InvalidMarshalError is an error type representing an error
+// occurring when marshaling a Go value type.
+type InvalidMarshalError struct {
+ Message string
+}
+
+// Error returns the string representation of the error.
+// Satisfying the error interface.
+func (e *InvalidMarshalError) Error() string {
+ return fmt.Sprintf("marshal failed, %s", e.Message)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go
new file mode 100644
index 000000000..792fdfa08
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/doc.go
@@ -0,0 +1,4 @@
+// Package encoding provides utilities for encoding values for specific
+// document encodings.
+
+package encoding
diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go
new file mode 100644
index 000000000..2fdfb5225
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/encoding.go
@@ -0,0 +1,40 @@
+package encoding
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+)
+
+// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol
+// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers
+//
+// Based on encoding/json floatEncoder from the Go Standard Library
+// https://golang.org/src/encoding/json/encode.go
+func EncodeFloat(dst []byte, v float64, bits int) []byte {
+ if math.IsInf(v, 0) || math.IsNaN(v) {
+ panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits)))
+ }
+
+ abs := math.Abs(v)
+ fmt := byte('f')
+
+ if abs != 0 {
+ if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
+ fmt = 'e'
+ }
+ }
+
+ dst = strconv.AppendFloat(dst, v, fmt, -1, bits)
+
+ if fmt == 'e' {
+ // clean up e-09 to e-9
+ n := len(dst)
+ if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' {
+ dst[n-2] = dst[n-1]
+ dst = dst[:n-1]
+ }
+ }
+
+ return dst
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go
new file mode 100644
index 000000000..543e7cf03
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go
@@ -0,0 +1,123 @@
+package httpbinding
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+const (
+ contentLengthHeader = "Content-Length"
+ floatNaN = "NaN"
+ floatInfinity = "Infinity"
+ floatNegInfinity = "-Infinity"
+)
+
+// An Encoder provides encoding of REST URI path, query, and header components
+// of an HTTP request. Can also encode a stream as the payload.
+//
+// Does not support SetFields.
+type Encoder struct {
+ path, rawPath, pathBuffer []byte
+
+ query url.Values
+ header http.Header
+}
+
+// NewEncoder creates a new encoder from the passed in request. It assumes that
+// raw path contains no valuable information at this point, so it passes in path
+// as path and raw path for subsequent trans
+func NewEncoder(path, query string, headers http.Header) (*Encoder, error) {
+ return NewEncoderWithRawPath(path, path, query, headers)
+}
+
+// NewHTTPBindingEncoder creates a new encoder from the passed in request. All query and
+// header values will be added on top of the request's existing values. Overwriting
+// duplicate values.
+func NewEncoderWithRawPath(path, rawPath, query string, headers http.Header) (*Encoder, error) {
+ parseQuery, err := url.ParseQuery(query)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse query string: %w", err)
+ }
+
+ e := &Encoder{
+ path: []byte(path),
+ rawPath: []byte(rawPath),
+ query: parseQuery,
+ header: headers.Clone(),
+ }
+
+ return e, nil
+}
+
+// Encode returns a REST protocol encoder for encoding HTTP bindings.
+//
+// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode
+// will look for whether the header is present, and if so will remove it and set the respective value on http.Request.
+//
+// Returns any error occurring during encoding.
+func (e *Encoder) Encode(req *http.Request) (*http.Request, error) {
+ req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath)
+ req.URL.RawQuery = e.query.Encode()
+
+ // net/http ignores Content-Length header and requires it to be set on http.Request
+ if v := e.header.Get(contentLengthHeader); len(v) > 0 {
+ iv, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ req.ContentLength = iv
+ e.header.Del(contentLengthHeader)
+ }
+
+ req.Header = e.header
+
+ return req, nil
+}
+
+// AddHeader returns a HeaderValue for appending to the given header name
+func (e *Encoder) AddHeader(key string) HeaderValue {
+ return newHeaderValue(e.header, key, true)
+}
+
+// SetHeader returns a HeaderValue for setting the given header name
+func (e *Encoder) SetHeader(key string) HeaderValue {
+ return newHeaderValue(e.header, key, false)
+}
+
+// Headers returns a Header used for encoding headers with the given prefix
+func (e *Encoder) Headers(prefix string) Headers {
+ return Headers{
+ header: e.header,
+ prefix: strings.TrimSpace(prefix),
+ }
+}
+
+// HasHeader returns if a header with the key specified exists with one or
+// more value.
+func (e Encoder) HasHeader(key string) bool {
+ return len(e.header[key]) != 0
+}
+
+// SetURI returns a URIValue used for setting the given path key
+func (e *Encoder) SetURI(key string) URIValue {
+ return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key)
+}
+
+// SetQuery returns a QueryValue used for setting the given query key
+func (e *Encoder) SetQuery(key string) QueryValue {
+ return NewQueryValue(e.query, key, false)
+}
+
+// AddQuery returns a QueryValue used for appending the given query key
+func (e *Encoder) AddQuery(key string) QueryValue {
+ return NewQueryValue(e.query, key, true)
+}
+
+// HasQuery returns if a query with the key specified exists with one or
+// more values.
+func (e *Encoder) HasQuery(key string) bool {
+ return len(e.query.Get(key)) != 0
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go
new file mode 100644
index 000000000..f9256e175
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go
@@ -0,0 +1,122 @@
+package httpbinding
+
+import (
+ "encoding/base64"
+ "math"
+ "math/big"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+// Headers is used to encode header keys using a provided prefix
+type Headers struct {
+ header http.Header
+ prefix string
+}
+
+// AddHeader returns a HeaderValue used to append values to prefix+key
+func (h Headers) AddHeader(key string) HeaderValue {
+ return h.newHeaderValue(key, true)
+}
+
+// SetHeader returns a HeaderValue used to set the value of prefix+key
+func (h Headers) SetHeader(key string) HeaderValue {
+ return h.newHeaderValue(key, false)
+}
+
+func (h Headers) newHeaderValue(key string, append bool) HeaderValue {
+ return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append)
+}
+
+// HeaderValue is used to encode values to an HTTP header
+type HeaderValue struct {
+ header http.Header
+ key string
+ append bool
+}
+
+func newHeaderValue(header http.Header, key string, append bool) HeaderValue {
+ return HeaderValue{header: header, key: strings.TrimSpace(key), append: append}
+}
+
+func (h HeaderValue) modifyHeader(value string) {
+ if h.append {
+ h.header[h.key] = append(h.header[h.key], value)
+ } else {
+ h.header[h.key] = append(h.header[h.key][:0], value)
+ }
+}
+
+// String encodes the value v as the header string value
+func (h HeaderValue) String(v string) {
+ h.modifyHeader(v)
+}
+
+// Byte encodes the value v as a query string value
+func (h HeaderValue) Byte(v int8) {
+ h.Long(int64(v))
+}
+
+// Short encodes the value v as a query string value
+func (h HeaderValue) Short(v int16) {
+ h.Long(int64(v))
+}
+
+// Integer encodes the value v as the header string value
+func (h HeaderValue) Integer(v int32) {
+ h.Long(int64(v))
+}
+
+// Long encodes the value v as the header string value
+func (h HeaderValue) Long(v int64) {
+ h.modifyHeader(strconv.FormatInt(v, 10))
+}
+
+// Boolean encodes the value v as a query string value
+func (h HeaderValue) Boolean(v bool) {
+ h.modifyHeader(strconv.FormatBool(v))
+}
+
+// Float encodes the value v as a query string value
+func (h HeaderValue) Float(v float32) {
+ h.float(float64(v), 32)
+}
+
+// Double encodes the value v as a query string value
+func (h HeaderValue) Double(v float64) {
+ h.float(v, 64)
+}
+
+func (h HeaderValue) float(v float64, bitSize int) {
+ switch {
+ case math.IsNaN(v):
+ h.String(floatNaN)
+ case math.IsInf(v, 1):
+ h.String(floatInfinity)
+ case math.IsInf(v, -1):
+ h.String(floatNegInfinity)
+ default:
+ h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize))
+ }
+}
+
+// BigInteger encodes the value v as a query string value
+func (h HeaderValue) BigInteger(v *big.Int) {
+ h.modifyHeader(v.String())
+}
+
+// BigDecimal encodes the value v as a query string value
+func (h HeaderValue) BigDecimal(v *big.Float) {
+ if i, accuracy := v.Int64(); accuracy == big.Exact {
+ h.Long(i)
+ return
+ }
+ h.modifyHeader(v.Text('e', -1))
+}
+
+// Blob encodes the value v as a base64 header string value
+func (h HeaderValue) Blob(v []byte) {
+ encodeToString := base64.StdEncoding.EncodeToString(v)
+ h.modifyHeader(encodeToString)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go
new file mode 100644
index 000000000..9ae308540
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go
@@ -0,0 +1,108 @@
+package httpbinding
+
+import (
+ "bytes"
+ "fmt"
+)
+
+const (
+ uriTokenStart = '{'
+ uriTokenStop = '}'
+ uriTokenSkip = '+'
+)
+
+func bufCap(b []byte, n int) []byte {
+ if cap(b) < n {
+ return make([]byte, 0, n)
+ }
+
+ return b[0:0]
+}
+
+// replacePathElement replaces a single element in the path []byte.
+// Escape is used to control whether the value will be escaped using Amazon path escape style.
+func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) {
+ // search for "{}". If not found, search for the greedy version "{+}". If none are found, return error
+ fieldBuf = bufCap(fieldBuf, len(key)+2) // { }
+ fieldBuf = append(fieldBuf, uriTokenStart)
+ fieldBuf = append(fieldBuf, key...)
+ fieldBuf = append(fieldBuf, uriTokenStop)
+
+ start := bytes.Index(path, fieldBuf)
+ encodeSep := true
+ if start < 0 {
+ fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] }
+ fieldBuf = append(fieldBuf, uriTokenStart)
+ fieldBuf = append(fieldBuf, key...)
+ fieldBuf = append(fieldBuf, uriTokenSkip)
+ fieldBuf = append(fieldBuf, uriTokenStop)
+
+ start = bytes.Index(path, fieldBuf)
+ if start < 0 {
+ return path, fieldBuf, fmt.Errorf("invalid path index, start=%d. %s", start, path)
+ }
+ encodeSep = false
+ }
+ end := start + len(fieldBuf)
+
+ if escape {
+ val = EscapePath(val, encodeSep)
+ }
+
+ fieldBuf = bufCap(fieldBuf, len(val))
+ fieldBuf = append(fieldBuf, val...)
+
+ keyLen := end - start
+ valLen := len(fieldBuf)
+
+ if keyLen == valLen {
+ copy(path[start:], fieldBuf)
+ return path, fieldBuf, nil
+ }
+
+ newLen := len(path) + (valLen - keyLen)
+ if len(path) < newLen {
+ path = path[:cap(path)]
+ }
+ if cap(path) < newLen {
+ newURI := make([]byte, newLen)
+ copy(newURI, path)
+ path = newURI
+ }
+
+ // shift
+ copy(path[start+valLen:], path[end:])
+ path = path[:newLen]
+ copy(path[start:], fieldBuf)
+
+ return path, fieldBuf, nil
+}
+
+// EscapePath escapes part of a URL path in Amazon style.
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+var noEscape [256]bool
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go
new file mode 100644
index 000000000..c2e7d0a20
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go
@@ -0,0 +1,107 @@
+package httpbinding
+
+import (
+ "encoding/base64"
+ "math"
+ "math/big"
+ "net/url"
+ "strconv"
+)
+
+// QueryValue is used to encode query key values
+type QueryValue struct {
+ query url.Values
+ key string
+ append bool
+}
+
+// NewQueryValue creates a new QueryValue which enables encoding
+// a query value into the given url.Values.
+func NewQueryValue(query url.Values, key string, append bool) QueryValue {
+ return QueryValue{
+ query: query,
+ key: key,
+ append: append,
+ }
+}
+
+func (qv QueryValue) updateKey(value string) {
+ if qv.append {
+ qv.query.Add(qv.key, value)
+ } else {
+ qv.query.Set(qv.key, value)
+ }
+}
+
+// Blob encodes v as a base64 query string value
+func (qv QueryValue) Blob(v []byte) {
+ encodeToString := base64.StdEncoding.EncodeToString(v)
+ qv.updateKey(encodeToString)
+}
+
+// Boolean encodes v as a query string value
+func (qv QueryValue) Boolean(v bool) {
+ qv.updateKey(strconv.FormatBool(v))
+}
+
+// String encodes v as a query string value
+func (qv QueryValue) String(v string) {
+ qv.updateKey(v)
+}
+
+// Byte encodes v as a query string value
+func (qv QueryValue) Byte(v int8) {
+ qv.Long(int64(v))
+}
+
+// Short encodes v as a query string value
+func (qv QueryValue) Short(v int16) {
+ qv.Long(int64(v))
+}
+
+// Integer encodes v as a query string value
+func (qv QueryValue) Integer(v int32) {
+ qv.Long(int64(v))
+}
+
+// Long encodes v as a query string value
+func (qv QueryValue) Long(v int64) {
+ qv.updateKey(strconv.FormatInt(v, 10))
+}
+
+// Float encodes v as a query string value
+func (qv QueryValue) Float(v float32) {
+ qv.float(float64(v), 32)
+}
+
+// Double encodes v as a query string value
+func (qv QueryValue) Double(v float64) {
+ qv.float(v, 64)
+}
+
+func (qv QueryValue) float(v float64, bitSize int) {
+ switch {
+ case math.IsNaN(v):
+ qv.String(floatNaN)
+ case math.IsInf(v, 1):
+ qv.String(floatInfinity)
+ case math.IsInf(v, -1):
+ qv.String(floatNegInfinity)
+ default:
+ qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize))
+ }
+}
+
+// BigInteger encodes v as a query string value
+func (qv QueryValue) BigInteger(v *big.Int) {
+ qv.updateKey(v.String())
+}
+
+// BigDecimal encodes v as a query string value
+func (qv QueryValue) BigDecimal(v *big.Float) {
+ if i, accuracy := v.Int64(); accuracy == big.Exact {
+ qv.Long(i)
+ return
+ }
+ qv.updateKey(v.Text('e', -1))
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go
new file mode 100644
index 000000000..f04e11984
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go
@@ -0,0 +1,111 @@
+package httpbinding
+
+import (
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+// URIValue is used to encode named URI parameters
+type URIValue struct {
+ path, rawPath, buffer *[]byte
+
+ key string
+}
+
+func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue {
+ return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key}
+}
+
+func (u URIValue) modifyURI(value string) (err error) {
+ *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false)
+ if err != nil {
+ return err
+ }
+ *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true)
+ return err
+}
+
+// Boolean encodes v as a URI string value
+func (u URIValue) Boolean(v bool) error {
+ return u.modifyURI(strconv.FormatBool(v))
+}
+
+// String encodes v as a URI string value
+func (u URIValue) String(v string) error {
+ return u.modifyURI(v)
+}
+
+// Byte encodes v as a URI string value
+func (u URIValue) Byte(v int8) error {
+ return u.Long(int64(v))
+}
+
+// Short encodes v as a URI string value
+func (u URIValue) Short(v int16) error {
+ return u.Long(int64(v))
+}
+
+// Integer encodes v as a URI string value
+func (u URIValue) Integer(v int32) error {
+ return u.Long(int64(v))
+}
+
+// Long encodes v as a URI string value
+func (u URIValue) Long(v int64) error {
+ return u.modifyURI(strconv.FormatInt(v, 10))
+}
+
+// Float encodes v as a query string value
+func (u URIValue) Float(v float32) error {
+ return u.float(float64(v), 32)
+}
+
+// Double encodes v as a query string value
+func (u URIValue) Double(v float64) error {
+ return u.float(v, 64)
+}
+
+func (u URIValue) float(v float64, bitSize int) error {
+ switch {
+ case math.IsNaN(v):
+ return u.String(floatNaN)
+ case math.IsInf(v, 1):
+ return u.String(floatInfinity)
+ case math.IsInf(v, -1):
+ return u.String(floatNegInfinity)
+ default:
+ return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize))
+ }
+}
+
+// BigInteger encodes v as a query string value
+func (u URIValue) BigInteger(v *big.Int) error {
+ return u.modifyURI(v.String())
+}
+
+// BigDecimal encodes v as a query string value
+func (u URIValue) BigDecimal(v *big.Float) error {
+ if i, accuracy := v.Int64(); accuracy == big.Exact {
+ return u.Long(i)
+ }
+ return u.modifyURI(v.Text('e', -1))
+}
+
+// SplitURI parses a Smithy HTTP binding trait URI
+func SplitURI(uri string) (path, query string) {
+ queryStart := strings.IndexRune(uri, '?')
+ if queryStart == -1 {
+ path = uri
+ return path, query
+ }
+
+ path = uri[:queryStart]
+ if queryStart+1 >= len(uri) {
+ return path, query
+ }
+ query = uri[queryStart+1:]
+
+ return path, query
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/json/array.go b/vendor/github.com/aws/smithy-go/encoding/json/array.go
new file mode 100644
index 000000000..7a232f660
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/json/array.go
@@ -0,0 +1,35 @@
+package json
+
+import (
+ "bytes"
+)
+
+// Array represents the encoding of a JSON Array
+type Array struct {
+ w *bytes.Buffer
+ writeComma bool
+ scratch *[]byte
+}
+
+func newArray(w *bytes.Buffer, scratch *[]byte) *Array {
+ w.WriteRune(leftBracket)
+ return &Array{w: w, scratch: scratch}
+}
+
+// Value adds a new element to the JSON Array.
+// Returns a Value type that is used to encode
+// the array element.
+func (a *Array) Value() Value {
+ if a.writeComma {
+ a.w.WriteRune(comma)
+ } else {
+ a.writeComma = true
+ }
+
+ return newValue(a.w, a.scratch)
+}
+
+// Close encodes the end of the JSON Array
+func (a *Array) Close() {
+ a.w.WriteRune(rightBracket)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/json/constants.go b/vendor/github.com/aws/smithy-go/encoding/json/constants.go
new file mode 100644
index 000000000..91044092a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/json/constants.go
@@ -0,0 +1,15 @@
+package json
+
+const (
+ leftBrace = '{'
+ rightBrace = '}'
+
+ leftBracket = '['
+ rightBracket = ']'
+
+ comma = ','
+ quote = '"'
+ colon = ':'
+
+ null = "null"
+)
diff --git a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go
new file mode 100644
index 000000000..7050c85b3
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go
@@ -0,0 +1,139 @@
+package json
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// DiscardUnknownField discards unknown fields from a decoder body.
+// This function is useful while deserializing a JSON body with additional
+// unknown information that should be discarded.
+func DiscardUnknownField(decoder *json.Decoder) error {
+ // This deliberately does not share logic with CollectUnknownField, even
+ // though it could, because if we were to delegate to that then we'd incur
+ // extra allocations and general memory usage.
+ v, err := decoder.Token()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if _, ok := v.(json.Delim); ok {
+ for decoder.More() {
+ err = DiscardUnknownField(decoder)
+ }
+ endToken, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if _, ok := endToken.(json.Delim); !ok {
+ return fmt.Errorf("invalid JSON : expected json delimiter, found %T %v",
+ endToken, endToken)
+ }
+ }
+
+ return nil
+}
+
+// CollectUnknownField grabs the contents of unknown fields from the decoder body
+// and returns them as a byte slice. This is useful for skipping unknown fields without
+// completely discarding them.
+func CollectUnknownField(decoder *json.Decoder) ([]byte, error) {
+ result, err := collectUnknownField(decoder)
+ if err != nil {
+ return nil, err
+ }
+
+ buff := bytes.NewBuffer(nil)
+ encoder := json.NewEncoder(buff)
+
+ if err := encoder.Encode(result); err != nil {
+ return nil, err
+ }
+
+ return buff.Bytes(), nil
+}
+
+func collectUnknownField(decoder *json.Decoder) (interface{}, error) {
+ // Grab the initial value. This could either be a concrete value like a string or a a
+ // delimiter.
+ token, err := decoder.Token()
+ if err == io.EOF {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // If it's an array or object, we'll need to recurse.
+ delim, ok := token.(json.Delim)
+ if ok {
+ var result interface{}
+ if delim == '{' {
+ result, err = collectUnknownObject(decoder)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ result, err = collectUnknownArray(decoder)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Discard the closing token. decoder.Token handles checking for matching delimiters
+ if _, err := decoder.Token(); err != nil {
+ return nil, err
+ }
+ return result, nil
+ }
+
+ return token, nil
+}
+
+func collectUnknownArray(decoder *json.Decoder) ([]interface{}, error) {
+ // We need to create an empty array here instead of a nil array, since by getting
+ // into this function at all we necessarily have seen a non-nil list.
+ array := []interface{}{}
+
+ for decoder.More() {
+ value, err := collectUnknownField(decoder)
+ if err != nil {
+ return nil, err
+ }
+ array = append(array, value)
+ }
+
+ return array, nil
+}
+
+func collectUnknownObject(decoder *json.Decoder) (map[string]interface{}, error) {
+ object := make(map[string]interface{})
+
+ for decoder.More() {
+ key, err := collectUnknownField(decoder)
+ if err != nil {
+ return nil, err
+ }
+
+ // Keys have to be strings, which is particularly important as the encoder
+ // won't except a map with interface{} keys
+ stringKey, ok := key.(string)
+ if !ok {
+ return nil, fmt.Errorf("expected string key, found %T", key)
+ }
+
+ value, err := collectUnknownField(decoder)
+ if err != nil {
+ return nil, err
+ }
+
+ object[stringKey] = value
+ }
+
+ return object, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go
new file mode 100644
index 000000000..8772953f1
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go
@@ -0,0 +1,30 @@
+package json
+
+import (
+ "bytes"
+)
+
+// Encoder is JSON encoder that supports construction of JSON values
+// using methods.
+type Encoder struct {
+ w *bytes.Buffer
+ Value
+}
+
+// NewEncoder returns a new JSON encoder
+func NewEncoder() *Encoder {
+ writer := bytes.NewBuffer(nil)
+ scratch := make([]byte, 64)
+
+ return &Encoder{w: writer, Value: newValue(writer, &scratch)}
+}
+
+// String returns the String output of the JSON encoder
+func (e Encoder) String() string {
+ return e.w.String()
+}
+
+// Bytes returns the []byte slice of the JSON encoder
+func (e Encoder) Bytes() []byte {
+ return e.w.Bytes()
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/json/escape.go b/vendor/github.com/aws/smithy-go/encoding/json/escape.go
new file mode 100644
index 000000000..d984d0cdc
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/json/escape.go
@@ -0,0 +1,198 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet
+
+package json
+
+import (
+ "bytes"
+ "unicode/utf8"
+)
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+ ' ': true,
+ '!': true,
+ '"': false,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+ ',': true,
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+ ';': true,
+ '<': true,
+ '=': true,
+ '>': true,
+ '?': true,
+ '@': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '[': true,
+ '\\': false,
+ ']': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '{': true,
+ '|': true,
+ '}': true,
+ '~': true,
+ '\u007f': true,
+}
+
+// copied from Go 1.8 stdlib's encoding/json/#hex
+var hex = "0123456789abcdef"
+
+// escapeStringBytes escapes and writes the passed in string bytes to the dst
+// buffer
+//
+// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes
+func escapeStringBytes(e *bytes.Buffer, s []byte) {
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if safeSet[b] {
+ i++
+ continue
+ }
+ if start < i {
+ e.Write(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \t, \n and \r.
+ // If escapeHTML is set, it also escapes <, >, and &
+ // because they can lead to security holes when
+ // user-controlled strings are rendered into JSON
+ // and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.Write(s[start:])
+ }
+ e.WriteByte('"')
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/json/object.go b/vendor/github.com/aws/smithy-go/encoding/json/object.go
new file mode 100644
index 000000000..722346d03
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/json/object.go
@@ -0,0 +1,40 @@
+package json
+
+import (
+ "bytes"
+)
+
+// Object represents the encoding of a JSON Object type
+type Object struct {
+ w *bytes.Buffer
+ writeComma bool
+ scratch *[]byte
+}
+
+func newObject(w *bytes.Buffer, scratch *[]byte) *Object {
+ w.WriteRune(leftBrace)
+ return &Object{w: w, scratch: scratch}
+}
+
+func (o *Object) writeKey(key string) {
+ escapeStringBytes(o.w, []byte(key))
+ o.w.WriteRune(colon)
+}
+
+// Key adds the given named key to the JSON object.
+// Returns a Value encoder that should be used to encode
+// a JSON value type.
+func (o *Object) Key(name string) Value {
+ if o.writeComma {
+ o.w.WriteRune(comma)
+ } else {
+ o.writeComma = true
+ }
+ o.writeKey(name)
+ return newValue(o.w, o.scratch)
+}
+
+// Close encodes the end of the JSON Object
+func (o *Object) Close() {
+ o.w.WriteRune(rightBrace)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/json/value.go b/vendor/github.com/aws/smithy-go/encoding/json/value.go
new file mode 100644
index 000000000..b41ff1e15
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/json/value.go
@@ -0,0 +1,149 @@
+package json
+
+import (
+ "bytes"
+ "encoding/base64"
+ "math/big"
+ "strconv"
+
+ "github.com/aws/smithy-go/encoding"
+)
+
+// Value represents a JSON Value type
+// JSON Value types: Object, Array, String, Number, Boolean, and Null
+type Value struct {
+ w *bytes.Buffer
+ scratch *[]byte
+}
+
+// newValue returns a new Value encoder
+func newValue(w *bytes.Buffer, scratch *[]byte) Value {
+ return Value{w: w, scratch: scratch}
+}
+
+// String encodes v as a JSON string
+func (jv Value) String(v string) {
+ escapeStringBytes(jv.w, []byte(v))
+}
+
+// Byte encodes v as a JSON number
+func (jv Value) Byte(v int8) {
+ jv.Long(int64(v))
+}
+
+// Short encodes v as a JSON number
+func (jv Value) Short(v int16) {
+ jv.Long(int64(v))
+}
+
+// Integer encodes v as a JSON number
+func (jv Value) Integer(v int32) {
+ jv.Long(int64(v))
+}
+
+// Long encodes v as a JSON number
+func (jv Value) Long(v int64) {
+ *jv.scratch = strconv.AppendInt((*jv.scratch)[:0], v, 10)
+ jv.w.Write(*jv.scratch)
+}
+
+// ULong encodes v as a JSON number
+func (jv Value) ULong(v uint64) {
+ *jv.scratch = strconv.AppendUint((*jv.scratch)[:0], v, 10)
+ jv.w.Write(*jv.scratch)
+}
+
+// Float encodes v as a JSON number
+func (jv Value) Float(v float32) {
+ jv.float(float64(v), 32)
+}
+
+// Double encodes v as a JSON number
+func (jv Value) Double(v float64) {
+ jv.float(v, 64)
+}
+
+func (jv Value) float(v float64, bits int) {
+ *jv.scratch = encoding.EncodeFloat((*jv.scratch)[:0], v, bits)
+ jv.w.Write(*jv.scratch)
+}
+
+// Boolean encodes v as a JSON boolean
+func (jv Value) Boolean(v bool) {
+ *jv.scratch = strconv.AppendBool((*jv.scratch)[:0], v)
+ jv.w.Write(*jv.scratch)
+}
+
+// Base64EncodeBytes writes v as a base64 value in JSON string
+func (jv Value) Base64EncodeBytes(v []byte) {
+ encodeByteSlice(jv.w, (*jv.scratch)[:0], v)
+}
+
+// Write writes v directly to the JSON document
+func (jv Value) Write(v []byte) {
+ jv.w.Write(v)
+}
+
+// Array returns a new Array encoder
+func (jv Value) Array() *Array {
+ return newArray(jv.w, jv.scratch)
+}
+
+// Object returns a new Object encoder
+func (jv Value) Object() *Object {
+ return newObject(jv.w, jv.scratch)
+}
+
+// Null encodes a null JSON value
+func (jv Value) Null() {
+ jv.w.WriteString(null)
+}
+
+// BigInteger encodes v as JSON value
+func (jv Value) BigInteger(v *big.Int) {
+ jv.w.Write([]byte(v.Text(10)))
+}
+
+// BigDecimal encodes v as JSON value
+func (jv Value) BigDecimal(v *big.Float) {
+ if i, accuracy := v.Int64(); accuracy == big.Exact {
+ jv.Long(i)
+ return
+ }
+ // TODO: Should this try to match ES6 ToString similar to stdlib JSON?
+ jv.w.Write([]byte(v.Text('e', -1)))
+}
+
+// Based on encoding/json encodeByteSlice from the Go Standard Library
+// https://golang.org/src/encoding/json/encode.go
+func encodeByteSlice(w *bytes.Buffer, scratch []byte, v []byte) {
+ if v == nil {
+ w.WriteString(null)
+ return
+ }
+
+ w.WriteRune(quote)
+
+ encodedLen := base64.StdEncoding.EncodedLen(len(v))
+ if encodedLen <= len(scratch) {
+ // If the encoded bytes fit in e.scratch, avoid an extra
+ // allocation and use the cheaper Encoding.Encode.
+ dst := scratch[:encodedLen]
+ base64.StdEncoding.Encode(dst, v)
+ w.Write(dst)
+ } else if encodedLen <= 1024 {
+ // The encoded bytes are short enough to allocate for, and
+ // Encoding.Encode is still cheaper.
+ dst := make([]byte, encodedLen)
+ base64.StdEncoding.Encode(dst, v)
+ w.Write(dst)
+ } else {
+ // The encoded bytes are too long to cheaply allocate, and
+ // Encoding.Encode is no longer noticeably cheaper.
+ enc := base64.NewEncoder(base64.StdEncoding, w)
+ enc.Write(v)
+ enc.Close()
+ }
+
+ w.WriteRune(quote)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go
new file mode 100644
index 000000000..508f3c997
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/array.go
@@ -0,0 +1,49 @@
+package xml
+
+// arrayMemberWrapper is the default member wrapper tag name for XML Array type
+var arrayMemberWrapper = StartElement{
+ Name: Name{Local: "member"},
+}
+
+// Array represents the encoding of a XML array type
+type Array struct {
+ w writer
+ scratch *[]byte
+
+ // member start element is the array member wrapper start element
+ memberStartElement StartElement
+
+ // isFlattened indicates if the array is a flattened array.
+ isFlattened bool
+}
+
+// newArray returns an array encoder.
+// It also takes in the member start element, array start element.
+// It takes in a isFlattened bool, indicating that an array is flattened array.
+//
+// A wrapped array ["value1", "value2"] is represented as
+// `value1value2
`.
+
+// A flattened array `someList: ["value1", "value2"]` is represented as
+// `value1value2`.
+func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array {
+ var memberWrapper = memberStartElement
+ if isFlattened {
+ memberWrapper = arrayStartElement
+ }
+
+ return &Array{
+ w: w,
+ scratch: scratch,
+ memberStartElement: memberWrapper,
+ isFlattened: isFlattened,
+ }
+}
+
+// Member adds a new member to the XML array.
+// It returns a Value encoder.
+func (a *Array) Member() Value {
+ v := newValue(a.w, a.scratch, a.memberStartElement)
+ v.isFlattened = a.isFlattened
+ return v
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go
new file mode 100644
index 000000000..ccee90a63
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go
@@ -0,0 +1,10 @@
+package xml
+
+const (
+ leftAngleBracket = '<'
+ rightAngleBracket = '>'
+ forwardSlash = '/'
+ colon = ':'
+ equals = '='
+ quote = '"'
+)
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
new file mode 100644
index 000000000..f9200093e
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
@@ -0,0 +1,49 @@
+/*
+Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to
+shape serializer function in which a xml.Value will be passed around.
+
+Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings
+
+Member Element
+
+Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element
+write their own element start tag. These elements should always be closed.
+
+Flattened Element
+
+Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element
+do not write a start tag, and thus should not be closed.
+
+Simple types encoding
+
+All simple type methods on value such as String(), Long() etc; auto close the associated member element.
+
+Array
+
+Array returns the collection encoder. It has two modes, wrapped and flattened encoding.
+
+Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping.
+By default, a wrapped array members are wrapped with `member` named start element.
+
+ appletree
+
+Flattened arrays rely on Value being marked as flattened.
+If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements.
+
+ appletree
+
+Map
+
+Map is the map encoder. It has two modes, wrapped and flattened encoding.
+
+Wrapped map has Array() method, which facilitate map member wrapping.
+By default, a wrapped map members are wrapped with `entry` named start element.
+
+ appletreesnowice
+
+Flattened map rely on Value being marked as flattened.
+If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements.
+
+ appletreesnowice
+*/
+package xml
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go
new file mode 100644
index 000000000..ae84e7999
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/element.go
@@ -0,0 +1,91 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied and modified from Go 1.14 stdlib's encoding/xml
+
+package xml
+
+// A Name represents an XML name (Local) annotated
+// with a name space identifier (Space).
+// In tokens returned by Decoder.Token, the Space identifier
+// is given as a canonical URL, not the short prefix used
+// in the document being parsed.
+type Name struct {
+ Space, Local string
+}
+
+// An Attr represents an attribute in an XML element (Name=Value).
+type Attr struct {
+ Name Name
+ Value string
+}
+
+/*
+NewAttribute returns a pointer to an attribute.
+It takes in a local name aka attribute name, and value
+representing the attribute value.
+*/
+func NewAttribute(local, value string) Attr {
+ return Attr{
+ Name: Name{
+ Local: local,
+ },
+ Value: value,
+ }
+}
+
+/*
+NewNamespaceAttribute returns a pointer to an attribute.
+It takes in a local name aka attribute name, and value
+representing the attribute value.
+
+NewNamespaceAttribute appends `xmlns:` in front of namespace
+prefix.
+
+For creating a name space attribute representing
+`xmlns:prefix="http://example.com`, the breakdown would be:
+local = "prefix"
+value = "http://example.com"
+*/
+func NewNamespaceAttribute(local, value string) Attr {
+ attr := NewAttribute(local, value)
+
+ // default name space identifier
+ attr.Name.Space = "xmlns"
+ return attr
+}
+
+// A StartElement represents an XML start element.
+type StartElement struct {
+ Name Name
+ Attr []Attr
+}
+
+// Copy creates a new copy of StartElement.
+func (e StartElement) Copy() StartElement {
+ attrs := make([]Attr, len(e.Attr))
+ copy(attrs, e.Attr)
+ e.Attr = attrs
+ return e
+}
+
+// End returns the corresponding XML end element.
+func (e StartElement) End() EndElement {
+ return EndElement{e.Name}
+}
+
+// returns true if start element local name is empty
+func (e StartElement) isZero() bool {
+ return len(e.Name.Local) == 0
+}
+
+// An EndElement represents an XML end element.
+type EndElement struct {
+ Name Name
+}
+
+// returns true if end element local name is empty
+func (e EndElement) isZero() bool {
+ return len(e.Name.Local) == 0
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go
new file mode 100644
index 000000000..16fb3dddb
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go
@@ -0,0 +1,51 @@
+package xml
+
+// writer interface used by the xml encoder to write an encoded xml
+// document in a writer.
+type writer interface {
+
+ // Write takes in a byte slice and returns number of bytes written and error
+ Write(p []byte) (n int, err error)
+
+ // WriteRune takes in a rune and returns number of bytes written and error
+ WriteRune(r rune) (n int, err error)
+
+ // WriteString takes in a string and returns number of bytes written and error
+ WriteString(s string) (n int, err error)
+
+ // String method returns a string
+ String() string
+
+ // Bytes return a byte slice.
+ Bytes() []byte
+}
+
+// Encoder is an XML encoder that supports construction of XML values
+// using methods. The encoder takes in a writer and maintains a scratch buffer.
+type Encoder struct {
+ w writer
+ scratch *[]byte
+}
+
+// NewEncoder returns an XML encoder
+func NewEncoder(w writer) *Encoder {
+ scratch := make([]byte, 64)
+
+ return &Encoder{w: w, scratch: &scratch}
+}
+
+// String returns the string output of the XML encoder
+func (e Encoder) String() string {
+ return e.w.String()
+}
+
+// Bytes returns the []byte slice of the XML encoder
+func (e Encoder) Bytes() []byte {
+ return e.w.Bytes()
+}
+
+// RootElement builds a root element encoding
+// It writes it's start element tag. The value should be closed.
+func (e Encoder) RootElement(element StartElement) Value {
+ return newValue(e.w, e.scratch, element)
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go
new file mode 100644
index 000000000..f3db6ccca
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go
@@ -0,0 +1,51 @@
+package xml
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+)
+
+// ErrorComponents represents the error response fields
+// that will be deserialized from an xml error response body
+type ErrorComponents struct {
+ Code string
+ Message string
+}
+
+// GetErrorResponseComponents returns the error fields from an xml error response body
+func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) {
+ if noErrorWrapping {
+ var errResponse noWrappedErrorResponse
+ if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+ }
+ return ErrorComponents{
+ Code: errResponse.Code,
+ Message: errResponse.Message,
+ }, nil
+ }
+
+ var errResponse wrappedErrorResponse
+ if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
+ return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
+ }
+ return ErrorComponents{
+ Code: errResponse.Code,
+ Message: errResponse.Message,
+ }, nil
+}
+
+// noWrappedErrorResponse represents the error response body with
+// no internal ...
+type wrappedErrorResponse struct {
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go
new file mode 100644
index 000000000..1c5479af6
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go
@@ -0,0 +1,137 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Copied and modified from Go 1.14 stdlib's encoding/xml
+
+package xml
+
+import (
+ "unicode/utf8"
+)
+
+// Copied from Go 1.14 stdlib's encoding/xml
+var (
+ escQuot = []byte(""") // shorter than """
+ escApos = []byte("'") // shorter than "'"
+ escAmp = []byte("&")
+ escLT = []byte("<")
+ escGT = []byte(">")
+ escTab = []byte(" ")
+ escNL = []byte("
")
+ escCR = []byte("
")
+ escFFFD = []byte("\uFFFD") // Unicode replacement character
+
+ // Additional Escapes
+ escNextLine = []byte("
")
+ escLS = []byte("
")
+)
+
+// Decide whether the given rune is in the XML Character Range, per
+// the Char production of https://www.xml.com/axml/testaxml.htm,
+// Section 2.2 Characters.
+func isInCharacterRange(r rune) (inrange bool) {
+ return r == 0x09 ||
+ r == 0x0A ||
+ r == 0x0D ||
+ r >= 0x20 && r <= 0xD7FF ||
+ r >= 0xE000 && r <= 0xFFFD ||
+ r >= 0x10000 && r <= 0x10FFFF
+}
+
+// TODO: When do we need to escape the string?
+// Based on encoding/xml escapeString from the Go Standard Library.
+// https://golang.org/src/encoding/xml/xml.go
+func escapeString(e writer, s string) {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRuneInString(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = escQuot
+ case '\'':
+ esc = escApos
+ case '&':
+ esc = escAmp
+ case '<':
+ esc = escLT
+ case '>':
+ esc = escGT
+ case '\t':
+ esc = escTab
+ case '\n':
+ esc = escNL
+ case '\r':
+ esc = escCR
+ case '\u0085':
+ // Not escaped by stdlib
+ esc = escNextLine
+ case '\u2028':
+ // Not escaped by stdlib
+ esc = escLS
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = escFFFD
+ break
+ }
+ continue
+ }
+ e.WriteString(s[last : i-width])
+ e.Write(esc)
+ last = i
+ }
+ e.WriteString(s[last:])
+}
+
+// escapeText writes to w the properly escaped XML equivalent
+// of the plain text data s. If escapeNewline is true, newline
+// characters will be escaped.
+//
+// Based on encoding/xml escapeText from the Go Standard Library.
+// https://golang.org/src/encoding/xml/xml.go
+func escapeText(e writer, s []byte) {
+ var esc []byte
+ last := 0
+ for i := 0; i < len(s); {
+ r, width := utf8.DecodeRune(s[i:])
+ i += width
+ switch r {
+ case '"':
+ esc = escQuot
+ case '\'':
+ esc = escApos
+ case '&':
+ esc = escAmp
+ case '<':
+ esc = escLT
+ case '>':
+ esc = escGT
+ case '\t':
+ esc = escTab
+ case '\n':
+ // This always escapes newline, which is different than stdlib's optional
+ // escape of new line.
+ esc = escNL
+ case '\r':
+ esc = escCR
+ case '\u0085':
+ // Not escaped by stdlib
+ esc = escNextLine
+ case '\u2028':
+ // Not escaped by stdlib
+ esc = escLS
+ default:
+ if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) {
+ esc = escFFFD
+ break
+ }
+ continue
+ }
+ e.Write(s[last : i-width])
+ e.Write(esc)
+ last = i
+ }
+ e.Write(s[last:])
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go
new file mode 100644
index 000000000..e42858965
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/map.go
@@ -0,0 +1,53 @@
+package xml
+
+// mapEntryWrapper is the default member wrapper start element for XML Map entry
+var mapEntryWrapper = StartElement{
+ Name: Name{Local: "entry"},
+}
+
+// Map represents the encoding of a XML map type
+type Map struct {
+ w writer
+ scratch *[]byte
+
+ // member start element is the map entry wrapper start element
+ memberStartElement StartElement
+
+ // isFlattened returns true if the map is a flattened map
+ isFlattened bool
+}
+
+// newMap returns a map encoder which sets the default map
+// entry wrapper to `entry`.
+//
+// A map `someMap : {{key:"abc", value:"123"}}` is represented as
+// `abc123`.
+func newMap(w writer, scratch *[]byte) *Map {
+ return &Map{
+ w: w,
+ scratch: scratch,
+ memberStartElement: mapEntryWrapper,
+ }
+}
+
+// newFlattenedMap returns a map encoder which sets the map
+// entry wrapper to the passed in memberWrapper`.
+//
+// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as
+// `abc123`.
+func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map {
+ return &Map{
+ w: w,
+ scratch: scratch,
+ memberStartElement: memberWrapper,
+ isFlattened: true,
+ }
+}
+
+// Entry returns a Value encoder with map's element.
+// It writes the member wrapper start tag for each entry.
+func (m *Map) Entry() Value {
+ v := newValue(m.w, m.scratch, m.memberStartElement)
+ v.isFlattened = m.isFlattened
+ return v
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go
new file mode 100644
index 000000000..09434b2c0
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/value.go
@@ -0,0 +1,302 @@
+package xml
+
+import (
+ "encoding/base64"
+ "fmt"
+ "math/big"
+ "strconv"
+
+ "github.com/aws/smithy-go/encoding"
+)
+
+// Value represents an XML Value type
+// XML Value types: Object, Array, Map, String, Number, Boolean.
+type Value struct {
+ w writer
+ scratch *[]byte
+
+ // xml start element is the associated start element for the Value
+ startElement StartElement
+
+ // indicates if the Value represents a flattened shape
+ isFlattened bool
+}
+
+// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag
+func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value {
+ return Value{
+ w: w,
+ scratch: scratch,
+ startElement: startElement,
+ }
+}
+
+// newValue writes the start element xml tag and returns a Value
+func newValue(w writer, scratch *[]byte, startElement StartElement) Value {
+ writeStartElement(w, startElement)
+ return Value{w: w, scratch: scratch, startElement: startElement}
+}
+
+// writeStartElement takes in a start element and writes it.
+// It handles namespace, attributes in start element.
+func writeStartElement(w writer, el StartElement) error {
+ if el.isZero() {
+ return fmt.Errorf("xml start element cannot be nil")
+ }
+
+ w.WriteRune(leftAngleBracket)
+
+ if len(el.Name.Space) != 0 {
+ escapeString(w, el.Name.Space)
+ w.WriteRune(colon)
+ }
+ escapeString(w, el.Name.Local)
+ for _, attr := range el.Attr {
+ w.WriteRune(' ')
+ writeAttribute(w, &attr)
+ }
+
+ w.WriteRune(rightAngleBracket)
+ return nil
+}
+
+// writeAttribute writes an attribute from a provided Attribute
+// For a namespace attribute, the attr.Name.Space must be defined as "xmlns".
+// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName
+func writeAttribute(w writer, attr *Attr) {
+ // if local, space both are not empty
+ if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 {
+ escapeString(w, attr.Name.Space)
+ w.WriteRune(colon)
+ }
+
+ // if prefix is empty, the default `xmlns` space should be used as prefix.
+ if len(attr.Name.Local) == 0 {
+ attr.Name.Local = attr.Name.Space
+ }
+
+ escapeString(w, attr.Name.Local)
+ w.WriteRune(equals)
+ w.WriteRune(quote)
+ escapeString(w, attr.Value)
+ w.WriteRune(quote)
+}
+
+// writeEndElement takes in a end element and writes it.
+func writeEndElement(w writer, el EndElement) error {
+ if el.isZero() {
+ return fmt.Errorf("xml end element cannot be nil")
+ }
+
+ w.WriteRune(leftAngleBracket)
+ w.WriteRune(forwardSlash)
+
+ if len(el.Name.Space) != 0 {
+ escapeString(w, el.Name.Space)
+ w.WriteRune(colon)
+ }
+ escapeString(w, el.Name.Local)
+ w.WriteRune(rightAngleBracket)
+
+ return nil
+}
+
+// String encodes v as a XML string.
+// It will auto close the parent xml element tag.
+func (xv Value) String(v string) {
+ escapeString(xv.w, v)
+ xv.Close()
+}
+
+// Byte encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Byte(v int8) {
+ xv.Long(int64(v))
+}
+
+// Short encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Short(v int16) {
+ xv.Long(int64(v))
+}
+
+// Integer encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Integer(v int32) {
+ xv.Long(int64(v))
+}
+
+// Long encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Long(v int64) {
+ *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10)
+ xv.w.Write(*xv.scratch)
+
+ xv.Close()
+}
+
+// Float encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Float(v float32) {
+ xv.float(float64(v), 32)
+ xv.Close()
+}
+
+// Double encodes v as a XML number.
+// It will auto close the parent xml element tag.
+func (xv Value) Double(v float64) {
+ xv.float(v, 64)
+ xv.Close()
+}
+
+func (xv Value) float(v float64, bits int) {
+ *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits)
+ xv.w.Write(*xv.scratch)
+}
+
+// Boolean encodes v as a XML boolean.
+// It will auto close the parent xml element tag.
+func (xv Value) Boolean(v bool) {
+ *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v)
+ xv.w.Write(*xv.scratch)
+
+ xv.Close()
+}
+
+// Base64EncodeBytes writes v as a base64 value in XML string.
+// It will auto close the parent xml element tag.
+func (xv Value) Base64EncodeBytes(v []byte) {
+ encodeByteSlice(xv.w, (*xv.scratch)[:0], v)
+ xv.Close()
+}
+
+// BigInteger encodes v big.Int as XML value.
+// It will auto close the parent xml element tag.
+func (xv Value) BigInteger(v *big.Int) {
+ xv.w.Write([]byte(v.Text(10)))
+ xv.Close()
+}
+
+// BigDecimal encodes v big.Float as XML value.
+// It will auto close the parent xml element tag.
+func (xv Value) BigDecimal(v *big.Float) {
+ if i, accuracy := v.Int64(); accuracy == big.Exact {
+ xv.Long(i)
+ return
+ }
+
+ xv.w.Write([]byte(v.Text('e', -1)))
+ xv.Close()
+}
+
+// Write writes v directly to the xml document
+// if escapeXMLText is set to true, write will escape text.
+// It will auto close the parent xml element tag.
+func (xv Value) Write(v []byte, escapeXMLText bool) {
+ // escape and write xml text
+ if escapeXMLText {
+ escapeText(xv.w, v)
+ } else {
+ // write xml directly
+ xv.w.Write(v)
+ }
+
+ xv.Close()
+}
+
+// MemberElement does member element encoding. It returns a Value.
+// Member Element method should be used for all shapes except flattened shapes.
+//
+// A call to MemberElement will write nested element tags directly using the
+// provided start element. The value returned by MemberElement should be closed.
+func (xv Value) MemberElement(element StartElement) Value {
+ return newValue(xv.w, xv.scratch, element)
+}
+
+// FlattenedElement returns flattened element encoding. It returns a Value.
+// This method should be used for flattened shapes.
+//
+// Unlike MemberElement, flattened element will NOT write element tags
+// directly for the associated start element.
+//
+// The value returned by the FlattenedElement does not need to be closed.
+func (xv Value) FlattenedElement(element StartElement) Value {
+ v := newFlattenedValue(xv.w, xv.scratch, element)
+ v.isFlattened = true
+ return v
+}
+
+// Array returns an array encoder. By default, the members of array are
+// wrapped with `` element tag.
+// If value is marked as flattened, the start element is used to wrap the members instead of
+// the `` element.
+func (xv Value) Array() *Array {
+ return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened)
+}
+
+/*
+ArrayWithCustomName returns an array encoder.
+
+It takes named start element as an argument, the named start element will used to wrap xml array entries.
+for eg, `entry1`
+Here `customName` named start element will be wrapped on each array member.
+*/
+func (xv Value) ArrayWithCustomName(element StartElement) *Array {
+ return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened)
+}
+
+/*
+Map returns a map encoder. By default, the map entries are
+wrapped with `` element tag.
+
+If value is marked as flattened, the start element is used to wrap the entry instead of
+the `` element.
+*/
+func (xv Value) Map() *Map {
+ // flattened map
+ if xv.isFlattened {
+ return newFlattenedMap(xv.w, xv.scratch, xv.startElement)
+ }
+
+ // un-flattened map
+ return newMap(xv.w, xv.scratch)
+}
+
+// encodeByteSlice is modified copy of json encoder's encodeByteSlice.
+// It is used to base64 encode a byte slice.
+func encodeByteSlice(w writer, scratch []byte, v []byte) {
+ if v == nil {
+ return
+ }
+
+ encodedLen := base64.StdEncoding.EncodedLen(len(v))
+ if encodedLen <= len(scratch) {
+ // If the encoded bytes fit in e.scratch, avoid an extra
+ // allocation and use the cheaper Encoding.Encode.
+ dst := scratch[:encodedLen]
+ base64.StdEncoding.Encode(dst, v)
+ w.Write(dst)
+ } else if encodedLen <= 1024 {
+ // The encoded bytes are short enough to allocate for, and
+ // Encoding.Encode is still cheaper.
+ dst := make([]byte, encodedLen)
+ base64.StdEncoding.Encode(dst, v)
+ w.Write(dst)
+ } else {
+ // The encoded bytes are too long to cheaply allocate, and
+ // Encoding.Encode is no longer noticeably cheaper.
+ enc := base64.NewEncoder(base64.StdEncoding, w)
+ enc.Write(v)
+ enc.Close()
+ }
+}
+
+// IsFlattened returns true if value is for flattened shape.
+func (xv Value) IsFlattened() bool {
+ return xv.isFlattened
+}
+
+// Close closes the value.
+func (xv Value) Close() {
+ writeEndElement(xv.w, xv.startElement.End())
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go
new file mode 100644
index 000000000..dc4eebdff
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go
@@ -0,0 +1,154 @@
+package xml
+
+import (
+ "encoding/xml"
+ "fmt"
+ "strings"
+)
+
+// NodeDecoder is a XML decoder wrapper that is responsible to decoding
+// a single XML Node element and it's nested member elements. This wrapper decoder
+// takes in the start element of the top level node being decoded.
+type NodeDecoder struct {
+ Decoder *xml.Decoder
+ StartEl xml.StartElement
+}
+
+// WrapNodeDecoder returns an initialized XMLNodeDecoder
+func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder {
+ return NodeDecoder{
+ Decoder: decoder,
+ StartEl: startEl,
+ }
+}
+
+// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the
+// a token is the node decoder's end node token; and an error which indicates any error
+// that occurred while retrieving the start element
+func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) {
+ for {
+ token, e := d.Decoder.Token()
+ if e != nil {
+ return t, done, e
+ }
+
+ // check if we reach end of the node being decoded
+ if el, ok := token.(xml.EndElement); ok {
+ return t, el == d.StartEl.End(), err
+ }
+
+ if t, ok := token.(xml.StartElement); ok {
+ return restoreAttrNamespaces(t), false, err
+ }
+
+ // skip token if it is a comment or preamble or empty space value due to indentation
+ // or if it's a value and is not expected
+ }
+}
+
+// restoreAttrNamespaces update XML attributes to restore the short namespaces found within
+// the raw XML document.
+func restoreAttrNamespaces(node xml.StartElement) xml.StartElement {
+ if len(node.Attr) == 0 {
+ return node
+ }
+
+ // Generate a mapping of XML namespace values to their short names.
+ ns := map[string]string{}
+ for _, a := range node.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ break
+ }
+ }
+
+ for i, a := range node.Attr {
+ if a.Name.Space == "xmlns" {
+ continue
+ }
+ // By default, xml.Decoder will fully resolve these namespaces. So if you had
+ // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to
+ // continue to resolve as `bar` so we can easily identify it later on.
+ if v, ok := ns[node.Attr[i].Name.Space]; ok {
+ node.Attr[i].Name.Space = v
+ }
+ }
+ return node
+}
+
+// GetElement looks for the given tag name at the current level, and returns the element if found, and
+// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking
+// the document.
+func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) {
+ for {
+ token, done, err := d.Token()
+ if err != nil {
+ return t, err
+ }
+ if done {
+ return t, fmt.Errorf("%s node not found", name)
+ }
+ switch {
+ case strings.EqualFold(name, token.Name.Local):
+ return token, nil
+ default:
+ err = d.Decoder.Skip()
+ if err != nil {
+ return t, err
+ }
+ }
+ }
+}
+
+// Value provides an abstraction to retrieve char data value within an xml element.
+// The method will return an error if it encounters a nested xml element instead of char data.
+// This method should only be used to retrieve simple type or blob shape values as []byte.
+func (d NodeDecoder) Value() (c []byte, err error) {
+ t, e := d.Decoder.Token()
+ if e != nil {
+ return c, e
+ }
+
+ endElement := d.StartEl.End()
+
+ switch ev := t.(type) {
+ case xml.CharData:
+ c = ev.Copy()
+ case xml.EndElement: // end tag or self-closing
+ if ev == endElement {
+ return []byte{}, err
+ }
+ return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t)
+ default:
+ return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t)
+ }
+
+ t, e = d.Decoder.Token()
+ if e != nil {
+ return c, e
+ }
+
+ if ev, ok := t.(xml.EndElement); ok {
+ if ev == endElement {
+ return c, err
+ }
+ }
+
+ return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t)
+}
+
+// FetchRootElement takes in a decoder and returns the first start element within the xml body.
+// This function is useful in fetching the start element of an XML response and ignore the
+// comments and preamble
+func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) {
+ for {
+ t, e := decoder.Token()
+ if e != nil {
+ return startElement, e
+ }
+
+ if startElement, ok := t.(xml.StartElement); ok {
+ return startElement, err
+ }
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go
new file mode 100644
index 000000000..f778272be
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go
@@ -0,0 +1,23 @@
+package transport
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/aws/smithy-go"
+)
+
+// Endpoint is the endpoint object returned by Endpoint resolution V2
+type Endpoint struct {
+ // The complete URL minimally specifying the scheme and host.
+ // May optionally specify the port and base path component.
+ URI url.URL
+
+ // An optional set of headers to be sent using transport layer headers.
+ Headers http.Header
+
+ // A grab-bag property map of endpoint attributes. The
+ // values present here are subject to change, or being add/removed at any
+ // time.
+ Properties smithy.Properties
+}
diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go
new file mode 100644
index 000000000..e24e190dc
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go
@@ -0,0 +1,4 @@
+// Package rulesfn provides endpoint rule functions for evaluating endpoint
+// resolution rules.
+
+package rulesfn
diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go
new file mode 100644
index 000000000..5cf4a7b02
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go
@@ -0,0 +1,25 @@
+package rulesfn
+
+// Substring returns the substring of the input provided. If the start or stop
+// indexes are not valid for the input nil will be returned. If errors occur
+// they will be added to the provided [ErrorCollector].
+func SubString(input string, start, stop int, reverse bool) *string {
+ if start < 0 || stop < 1 || start >= stop || len(input) < stop {
+ return nil
+ }
+
+ for _, r := range input {
+ if r > 127 {
+ return nil
+ }
+ }
+
+ if !reverse {
+ v := input[start:stop]
+ return &v
+ }
+
+ rStart := len(input) - stop
+ rStop := len(input) - start
+ return SubString(input, rStart, rStop, false)
+}
diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go
new file mode 100644
index 000000000..0c1154127
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go
@@ -0,0 +1,130 @@
+package rulesfn
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// IsValidHostLabel returns if the input is a single valid [RFC 1123] host
+// label. If allowSubDomains is true, will allow validation to include nested
+// host labels. Returns false if the input is not a valid host label. If errors
+// occur they will be added to the provided [ErrorCollector].
+//
+// [RFC 1123]: https://www.ietf.org/rfc/rfc1123.txt
+func IsValidHostLabel(input string, allowSubDomains bool) bool {
+ var labels []string
+ if allowSubDomains {
+ labels = strings.Split(input, ".")
+ } else {
+ labels = []string{input}
+ }
+
+ for _, label := range labels {
+ if !smithyhttp.ValidHostLabel(label) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// ParseURL returns a [URL] if the provided string could be parsed. Returns nil
+// if the string could not be parsed. Any parsing error will be added to the
+// [ErrorCollector].
+//
+// If the input URL string contains an IP6 address with a zone index. The
+// returned [builtin.URL.Authority] value will contain the percent escaped (%)
+// zone index separator.
+func ParseURL(input string) *URL {
+ u, err := url.Parse(input)
+ if err != nil {
+ return nil
+ }
+
+ if u.RawQuery != "" {
+ return nil
+ }
+
+ if u.Scheme != "http" && u.Scheme != "https" {
+ return nil
+ }
+
+ normalizedPath := u.Path
+ if !strings.HasPrefix(normalizedPath, "/") {
+ normalizedPath = "/" + normalizedPath
+ }
+ if !strings.HasSuffix(normalizedPath, "/") {
+ normalizedPath = normalizedPath + "/"
+ }
+
+ // IP6 hosts may have zone indexes that need to be escaped to be valid in a
+ // URI. The Go URL parser will unescape the `%25` into `%`. This needs to
+ // be reverted since the returned URL will be used in string builders.
+ authority := strings.ReplaceAll(u.Host, "%", "%25")
+
+ return &URL{
+ Scheme: u.Scheme,
+ Authority: authority,
+ Path: u.Path,
+ NormalizedPath: normalizedPath,
+ IsIp: net.ParseIP(hostnameWithoutZone(u)) != nil,
+ }
+}
+
+// URL provides the structure describing the parts of a parsed URL returned by
+// [ParseURL].
+type URL struct {
+ Scheme string // https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+ Authority string // https://www.rfc-editor.org/rfc/rfc3986#section-3.2
+ Path string // https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+ NormalizedPath string // https://www.rfc-editor.org/rfc/rfc3986#section-6.2.3
+ IsIp bool
+}
+
+// URIEncode returns an percent-encoded [RFC3986 section 2.1] version of the
+// input string.
+//
+// [RFC3986 section 2.1]: https://www.rfc-editor.org/rfc/rfc3986#section-2.1
+func URIEncode(input string) string {
+ var output strings.Builder
+ for _, c := range []byte(input) {
+ if validPercentEncodedChar(c) {
+ output.WriteByte(c)
+ continue
+ }
+
+ fmt.Fprintf(&output, "%%%X", c)
+ }
+
+ return output.String()
+}
+
+func validPercentEncodedChar(c byte) bool {
+ return (c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '-' || c == '_' || c == '.' || c == '~'
+}
+
+// hostname implements u.Hostname() but strips the ipv6 zone ID (if present)
+// such that net.ParseIP can still recognize IPv6 addresses with zone IDs.
+//
+// FUTURE(10/2023): netip.ParseAddr handles this natively but we can't take
+// that package as a dependency yet due to our min go version (1.15, netip
+// starts in 1.18). When we align with go runtime deprecation policy in
+// 10/2023, we can remove this.
+func hostnameWithoutZone(u *url.URL) string {
+ full := u.Hostname()
+
+ // this more or less mimics the internals of net/ (see unexported
+ // splitHostZone in that source) but throws the zone away because we don't
+ // need it
+ if i := strings.LastIndex(full, "%"); i > -1 {
+ return full[:i]
+ }
+ return full
+}
diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go
new file mode 100644
index 000000000..d6948d020
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/errors.go
@@ -0,0 +1,137 @@
+package smithy
+
+import "fmt"
+
+// APIError provides the generic API and protocol agnostic error type all SDK
+// generated exception types will implement.
+type APIError interface {
+ error
+
+ // ErrorCode returns the error code for the API exception.
+ ErrorCode() string
+ // ErrorMessage returns the error message for the API exception.
+ ErrorMessage() string
+ // ErrorFault returns the fault for the API exception.
+ ErrorFault() ErrorFault
+}
+
+// GenericAPIError provides a generic concrete API error type that SDKs can use
+// to deserialize error responses into. Should be used for unmodeled or untyped
+// errors.
+type GenericAPIError struct {
+ Code string
+ Message string
+ Fault ErrorFault
+}
+
+// ErrorCode returns the error code for the API exception.
+func (e *GenericAPIError) ErrorCode() string { return e.Code }
+
+// ErrorMessage returns the error message for the API exception.
+func (e *GenericAPIError) ErrorMessage() string { return e.Message }
+
+// ErrorFault returns the fault for the API exception.
+func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault }
+
+func (e *GenericAPIError) Error() string {
+ return fmt.Sprintf("api error %s: %s", e.Code, e.Message)
+}
+
+var _ APIError = (*GenericAPIError)(nil)
+
+// OperationError decorates an underlying error which occurred while invoking
+// an operation with names of the operation and API.
+type OperationError struct {
+ ServiceID string
+ OperationName string
+ Err error
+}
+
+// Service returns the name of the API service the error occurred with.
+func (e *OperationError) Service() string { return e.ServiceID }
+
+// Operation returns the name of the API operation the error occurred with.
+func (e *OperationError) Operation() string { return e.OperationName }
+
+// Unwrap returns the nested error if any, or nil.
+func (e *OperationError) Unwrap() error { return e.Err }
+
+func (e *OperationError) Error() string {
+ return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err)
+}
+
+// DeserializationError provides a wrapper for an error that occurs during
+// deserialization.
+type DeserializationError struct {
+ Err error // original error
+ Snapshot []byte
+}
+
+// Error returns a formatted error for DeserializationError
+func (e *DeserializationError) Error() string {
+ const msg = "deserialization failed"
+ if e.Err == nil {
+ return msg
+ }
+ return fmt.Sprintf("%s, %v", msg, e.Err)
+}
+
+// Unwrap returns the underlying Error in DeserializationError
+func (e *DeserializationError) Unwrap() error { return e.Err }
+
+// ErrorFault provides the type for a Smithy API error fault.
+type ErrorFault int
+
+// ErrorFault enumeration values
+const (
+ FaultUnknown ErrorFault = iota
+ FaultServer
+ FaultClient
+)
+
+func (f ErrorFault) String() string {
+ switch f {
+ case FaultServer:
+ return "server"
+ case FaultClient:
+ return "client"
+ default:
+ return "unknown"
+ }
+}
+
+// SerializationError represents an error that occurred while attempting to serialize a request
+type SerializationError struct {
+ Err error // original error
+}
+
+// Error returns a formatted error for SerializationError
+func (e *SerializationError) Error() string {
+ const msg = "serialization failed"
+ if e.Err == nil {
+ return msg
+ }
+ return fmt.Sprintf("%s: %v", msg, e.Err)
+}
+
+// Unwrap returns the underlying Error in SerializationError
+func (e *SerializationError) Unwrap() error { return e.Err }
+
+// CanceledError is the error that will be returned by an API request that was
+// canceled. API operations given a Context may return this error when
+// canceled.
+type CanceledError struct {
+ Err error
+}
+
+// CanceledError returns true to satisfy interfaces checking for canceled errors.
+func (*CanceledError) CanceledError() bool { return true }
+
+// Unwrap returns the underlying error, if there was one.
+func (e *CanceledError) Unwrap() error {
+ return e.Err
+}
+
+func (e *CanceledError) Error() string {
+ return fmt.Sprintf("canceled, %v", e.Err)
+}
diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go
new file mode 100644
index 000000000..263059014
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package smithy
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.23.2"
diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE
new file mode 100644
index 000000000..fe6a62006
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go
new file mode 100644
index 000000000..9c9d02b94
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go
@@ -0,0 +1,8 @@
+// Package singleflight provides a duplicate function call suppression
+// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight
+// package. The package is forked, because the package a part of the unstable
+// and unversioned golang.org/x/sync module.
+//
+// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight
+
+package singleflight
diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go
new file mode 100644
index 000000000..e8a1b17d5
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go
@@ -0,0 +1,210 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package singleflight
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "sync"
+)
+
+// errGoexit indicates the runtime.Goexit was called in
+// the user given function.
+var errGoexit = errors.New("runtime.Goexit was called")
+
+// A panicError is an arbitrary value recovered from a panic
+// with the stack trace during the execution of given function.
+type panicError struct {
+ value interface{}
+ stack []byte
+}
+
+// Error implements error interface.
+func (p *panicError) Error() string {
+ return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
+}
+
+func newPanicError(v interface{}) error {
+ stack := debug.Stack()
+
+ // The first line of the stack trace is of the form "goroutine N [status]:"
+ // but by the time the panic reaches Do the goroutine may no longer exist
+ // and its status will have changed. Trim out the misleading line.
+ if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
+ stack = stack[line+1:]
+ }
+ return &panicError{value: v, stack: stack}
+}
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+
+ // These fields are written once before the WaitGroup is done
+ // and are only read after the WaitGroup is done.
+ val interface{}
+ err error
+
+ // forgotten indicates whether Forget was called with this call's key
+ // while the call was still in flight.
+ forgotten bool
+
+ // These fields are read and written with the singleflight
+ // mutex held before the WaitGroup is done, and are read but
+ // not written after the WaitGroup is done.
+ dups int
+ chans []chan<- Result
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group struct {
+ mu sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+ Val interface{}
+ Err error
+ Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.mu.Unlock()
+ c.wg.Wait()
+
+ if e, ok := c.err.(*panicError); ok {
+ panic(e)
+ } else if c.err == errGoexit {
+ runtime.Goexit()
+ }
+ return c.val, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ g.doCall(c, key, fn)
+ return c.val, c.err, c.dups > 0
+}
+
+// DoChan is like Do but returns a channel that will receive the
+// results when they are ready.
+//
+// The returned channel will not be closed.
+func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
+ ch := make(chan Result, 1)
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ c.chans = append(c.chans, ch)
+ g.mu.Unlock()
+ return ch
+ }
+ c := &call{chans: []chan<- Result{ch}}
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ go g.doCall(c, key, fn)
+
+ return ch
+}
+
+// doCall handles the single call for a key.
+func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
+ normalReturn := false
+ recovered := false
+
+ // use double-defer to distinguish panic from runtime.Goexit,
+ // more details see https://golang.org/cl/134395
+ defer func() {
+ // the given function invoked runtime.Goexit
+ if !normalReturn && !recovered {
+ c.err = errGoexit
+ }
+
+ c.wg.Done()
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ if !c.forgotten {
+ delete(g.m, key)
+ }
+
+ if e, ok := c.err.(*panicError); ok {
+ // In order to prevent the waiting channels from being blocked forever,
+ // needs to ensure that this panic cannot be recovered.
+ if len(c.chans) > 0 {
+ go panic(e)
+ select {} // Keep this goroutine around so that it will appear in the crash dump.
+ } else {
+ panic(e)
+ }
+ } else if c.err == errGoexit {
+ // Already in the process of goexit, no need to call again
+ } else {
+ // Normal return
+ for _, ch := range c.chans {
+ ch <- Result{c.val, c.err, c.dups > 0}
+ }
+ }
+ }()
+
+ func() {
+ defer func() {
+ if !normalReturn {
+ // Ideally, we would wait to take a stack trace until we've determined
+ // whether this is a panic or a runtime.Goexit.
+ //
+ // Unfortunately, the only way we can distinguish the two is to see
+ // whether the recover stopped the goroutine from terminating, and by
+ // the time we know that, the part of the stack trace relevant to the
+ // panic has been discarded.
+ if r := recover(); r != nil {
+ c.err = newPanicError(r)
+ }
+ }
+ }()
+
+ c.val, c.err = fn()
+ normalReturn = true
+ }()
+
+ if !normalReturn {
+ recovered = true
+ }
+}
+
+// Forget tells the singleflight to forget about a key. Future calls
+// to Do for this key will call the function rather than waiting for
+// an earlier call to complete.
+func (g *Group) Forget(key string) {
+ g.mu.Lock()
+ if c, ok := g.m[key]; ok {
+ c.forgotten = true
+ }
+ delete(g.m, key)
+ g.mu.Unlock()
+}
diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go
new file mode 100644
index 000000000..f8417c15b
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/io/byte.go
@@ -0,0 +1,12 @@
+package io
+
+const (
+ // Byte is 8 bits
+ Byte int64 = 1
+ // KibiByte (KiB) is 1024 Bytes
+ KibiByte = Byte * 1024
+ // MebiByte (MiB) is 1024 KiB
+ MebiByte = KibiByte * 1024
+ // GibiByte (GiB) is 1024 MiB
+ GibiByte = MebiByte * 1024
+)
diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go
new file mode 100644
index 000000000..a6a33eaf5
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/io/doc.go
@@ -0,0 +1,2 @@
+// Package io provides utilities for Smithy generated API clients.
+package io
diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go
new file mode 100644
index 000000000..07063f296
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/io/reader.go
@@ -0,0 +1,16 @@
+package io
+
+import (
+ "io"
+)
+
+// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method
+// that does nothing.
+type ReadSeekNopCloser struct {
+ io.ReadSeeker
+}
+
+// Close does nothing.
+func (ReadSeekNopCloser) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go
new file mode 100644
index 000000000..06b476add
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/io/ringbuffer.go
@@ -0,0 +1,94 @@
+package io
+
+import (
+ "bytes"
+ "io"
+)
+
+// RingBuffer struct satisfies io.ReadWrite interface.
+//
+// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a
+// revolving window.
+type RingBuffer struct {
+ slice []byte
+ start int
+ end int
+ size int
+}
+
+// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer.
+func NewRingBuffer(slice []byte) *RingBuffer {
+ ringBuf := RingBuffer{
+ slice: slice,
+ }
+ return &ringBuf
+}
+
+// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error.
+func (r *RingBuffer) Write(p []byte) (int, error) {
+ for _, b := range p {
+ // check if end points to invalid index, we need to circle back
+ if r.end == len(r.slice) {
+ r.end = 0
+ }
+ // check if start points to invalid index, we need to circle back
+ if r.start == len(r.slice) {
+ r.start = 0
+ }
+ // if ring buffer is filled, increment the start index
+ if r.size == len(r.slice) {
+ r.size--
+ r.start++
+ }
+
+ r.slice[r.end] = b
+ r.end++
+ r.size++
+ }
+ return len(p), nil
+}
+
+// Read copies the data on the ring buffer into the byte slice provided to the method.
+// Returns the read count along with any error encountered while reading.
+func (r *RingBuffer) Read(p []byte) (int, error) {
+ // readCount keeps track of the number of bytes read
+ var readCount int
+ for j := 0; j < len(p); j++ {
+ // if ring buffer is empty or completely read
+ // return EOF error.
+ if r.size == 0 {
+ return readCount, io.EOF
+ }
+
+ if r.start == len(r.slice) {
+ r.start = 0
+ }
+
+ p[j] = r.slice[r.start]
+ readCount++
+ // increment the start pointer for ring buffer
+ r.start++
+ // decrement the size of ring buffer
+ r.size--
+ }
+ return readCount, nil
+}
+
+// Len returns the number of unread bytes in the buffer.
+func (r *RingBuffer) Len() int {
+ return r.size
+}
+
+// Bytes returns a copy of the RingBuffer's bytes.
+func (r RingBuffer) Bytes() []byte {
+ var b bytes.Buffer
+ io.Copy(&b, &r)
+ return b.Bytes()
+}
+
+// Reset resets the ring buffer.
+func (r *RingBuffer) Reset() {
+ *r = RingBuffer{
+ slice: r.slice,
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh
new file mode 100644
index 000000000..800bf3769
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/local-mod-replace.sh
@@ -0,0 +1,39 @@
+#1/usr/bin/env bash
+
+PROJECT_DIR=""
+SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd)
+
+usage() {
+ echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2
+ exit 1
+}
+
+while getopts "hs:d:" options; do
+ case "${options}" in
+ s)
+ SMITHY_SOURCE_DIR=${OPTARG}
+ if [ "$SMITHY_SOURCE_DIR" == "" ]; then
+ echo "path to smithy-go source directory is required" || exit
+ usage
+ fi
+ ;;
+ d)
+ PROJECT_DIR=${OPTARG}
+ ;;
+ h)
+ usage
+ ;;
+ *)
+ usage
+ ;;
+ esac
+done
+
+if [ "$PROJECT_DIR" != "" ]; then
+ cd $PROJECT_DIR || exit
+fi
+
+go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do
+ repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}}
+ echo -replace $x=$repPath
+done | xargs go mod edit
diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go
new file mode 100644
index 000000000..2071924bd
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/logging/logger.go
@@ -0,0 +1,82 @@
+package logging
+
+import (
+ "context"
+ "io"
+ "log"
+)
+
+// Classification is the type of the log entry's classification name.
+type Classification string
+
+// Set of standard classifications that can be used by clients and middleware
+const (
+ Warn Classification = "WARN"
+ Debug Classification = "DEBUG"
+)
+
+// Logger is an interface for logging entries at certain classifications.
+type Logger interface {
+ // Logf is expected to support the standard fmt package "verbs".
+ Logf(classification Classification, format string, v ...interface{})
+}
+
+// LoggerFunc is a wrapper around a function to satisfy the Logger interface.
+type LoggerFunc func(classification Classification, format string, v ...interface{})
+
+// Logf delegates the logging request to the wrapped function.
+func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) {
+ f(classification, format, v...)
+}
+
+// ContextLogger is an optional interface a Logger implementation may expose that provides
+// the ability to create context aware log entries.
+type ContextLogger interface {
+ WithContext(context.Context) Logger
+}
+
+// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting
+// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will
+// be returned to the caller.
+func WithContext(ctx context.Context, logger Logger) Logger {
+ if logger == nil {
+ return Nop{}
+ }
+
+ cl, ok := logger.(ContextLogger)
+ if !ok {
+ return logger
+ }
+
+ return cl.WithContext(ctx)
+}
+
+// Nop is a Logger implementation that simply does not perform any logging.
+type Nop struct{}
+
+// Logf simply returns without performing any action
+func (n Nop) Logf(Classification, string, ...interface{}) {
+ return
+}
+
+// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's
+// Printf method.
+type StandardLogger struct {
+ Logger *log.Logger
+}
+
+// Logf logs the given classification and message to the underlying logger.
+func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) {
+ if len(classification) != 0 {
+ format = string(classification) + " " + format
+ }
+
+ s.Logger.Printf(format, v...)
+}
+
+// NewStandardLogger returns a new StandardLogger
+func NewStandardLogger(writer io.Writer) *StandardLogger {
+ return &StandardLogger{
+ Logger: log.New(writer, "SDK ", log.LstdFlags),
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/metrics/metrics.go b/vendor/github.com/aws/smithy-go/metrics/metrics.go
new file mode 100644
index 000000000..c009d9f27
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/metrics/metrics.go
@@ -0,0 +1,136 @@
+// Package metrics defines the metrics APIs used by Smithy clients.
+package metrics
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go"
+)
+
+// MeterProvider is the entry point for creating a Meter.
+type MeterProvider interface {
+ Meter(scope string, opts ...MeterOption) Meter
+}
+
+// MeterOption applies configuration to a Meter.
+type MeterOption func(o *MeterOptions)
+
+// MeterOptions represents configuration for a Meter.
+type MeterOptions struct {
+ Properties smithy.Properties
+}
+
+// Meter is the entry point for creation of measurement instruments.
+type Meter interface {
+ // integer/synchronous
+ Int64Counter(name string, opts ...InstrumentOption) (Int64Counter, error)
+ Int64UpDownCounter(name string, opts ...InstrumentOption) (Int64UpDownCounter, error)
+ Int64Gauge(name string, opts ...InstrumentOption) (Int64Gauge, error)
+ Int64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error)
+
+ // integer/asynchronous
+ Int64AsyncCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+ Int64AsyncUpDownCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+ Int64AsyncGauge(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+
+ // floating-point/synchronous
+ Float64Counter(name string, opts ...InstrumentOption) (Float64Counter, error)
+ Float64UpDownCounter(name string, opts ...InstrumentOption) (Float64UpDownCounter, error)
+ Float64Gauge(name string, opts ...InstrumentOption) (Float64Gauge, error)
+ Float64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error)
+
+ // floating-point/asynchronous
+ Float64AsyncCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+ Float64AsyncUpDownCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+ Float64AsyncGauge(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+}
+
+// InstrumentOption applies configuration to an instrument.
+type InstrumentOption func(o *InstrumentOptions)
+
+// InstrumentOptions represents configuration for an instrument.
+type InstrumentOptions struct {
+ UnitLabel string
+ Description string
+}
+
+// Int64Counter measures a monotonically increasing int64 value.
+type Int64Counter interface {
+ Add(context.Context, int64, ...RecordMetricOption)
+}
+
+// Int64UpDownCounter measures a fluctuating int64 value.
+type Int64UpDownCounter interface {
+ Add(context.Context, int64, ...RecordMetricOption)
+}
+
+// Int64Gauge samples a discrete int64 value.
+type Int64Gauge interface {
+ Sample(context.Context, int64, ...RecordMetricOption)
+}
+
+// Int64Histogram records multiple data points for an int64 value.
+type Int64Histogram interface {
+ Record(context.Context, int64, ...RecordMetricOption)
+}
+
+// Float64Counter measures a monotonically increasing float64 value.
+type Float64Counter interface {
+ Add(context.Context, float64, ...RecordMetricOption)
+}
+
+// Float64UpDownCounter measures a fluctuating float64 value.
+type Float64UpDownCounter interface {
+ Add(context.Context, float64, ...RecordMetricOption)
+}
+
+// Float64Gauge samples a discrete float64 value.
+type Float64Gauge interface {
+ Sample(context.Context, float64, ...RecordMetricOption)
+}
+
+// Float64Histogram records multiple data points for an float64 value.
+type Float64Histogram interface {
+ Record(context.Context, float64, ...RecordMetricOption)
+}
+
+// AsyncInstrument is the universal handle returned for creation of all async
+// instruments.
+//
+// Callers use the Stop() API to unregister the callback passed at instrument
+// creation.
+type AsyncInstrument interface {
+ Stop()
+}
+
+// Int64Callback describes a function invoked when an async int64 instrument is
+// read.
+type Int64Callback func(context.Context, Int64Observer)
+
+// Int64Observer is the interface passed to async int64 instruments.
+//
+// Callers use the Observe() API of this interface to report metrics to the
+// underlying collector.
+type Int64Observer interface {
+ Observe(context.Context, int64, ...RecordMetricOption)
+}
+
+// Float64Callback describes a function invoked when an async float64
+// instrument is read.
+type Float64Callback func(context.Context, Float64Observer)
+
+// Float64Observer is the interface passed to async int64 instruments.
+//
+// Callers use the Observe() API of this interface to report metrics to the
+// underlying collector.
+type Float64Observer interface {
+ Observe(context.Context, float64, ...RecordMetricOption)
+}
+
+// RecordMetricOption applies configuration to a recorded metric.
+type RecordMetricOption func(o *RecordMetricOptions)
+
+// RecordMetricOptions represents configuration for a recorded metric.
+type RecordMetricOptions struct {
+ Properties smithy.Properties
+}
diff --git a/vendor/github.com/aws/smithy-go/metrics/nop.go b/vendor/github.com/aws/smithy-go/metrics/nop.go
new file mode 100644
index 000000000..444126df5
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/metrics/nop.go
@@ -0,0 +1,98 @@
+package metrics
+
+import "context"
+
+// NopMeterProvider is a no-op metrics implementation.
+type NopMeterProvider struct{}
+
+var _ MeterProvider = (*NopMeterProvider)(nil)
+
+// Meter returns a meter which creates no-op instruments.
+func (NopMeterProvider) Meter(string, ...MeterOption) Meter {
+ return NopMeter{}
+}
+
+// NopMeter creates no-op instruments.
+type NopMeter struct{}
+
+var _ Meter = (*NopMeter)(nil)
+
+// Int64Counter creates a no-op instrument.
+func (NopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) {
+ return nopInstrumentInt64, nil
+}
+
+// Int64UpDownCounter creates a no-op instrument.
+func (NopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) {
+ return nopInstrumentInt64, nil
+}
+
+// Int64Gauge creates a no-op instrument.
+func (NopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) {
+ return nopInstrumentInt64, nil
+}
+
+// Int64Histogram creates a no-op instrument.
+func (NopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) {
+ return nopInstrumentInt64, nil
+}
+
+// Int64AsyncCounter creates a no-op instrument.
+func (NopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentInt64, nil
+}
+
+// Int64AsyncUpDownCounter creates a no-op instrument.
+func (NopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentInt64, nil
+}
+
+// Int64AsyncGauge creates a no-op instrument.
+func (NopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentInt64, nil
+}
+
+// Float64Counter creates a no-op instrument.
+func (NopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) {
+ return nopInstrumentFloat64, nil
+}
+
+// Float64UpDownCounter creates a no-op instrument.
+func (NopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) {
+ return nopInstrumentFloat64, nil
+}
+
+// Float64Gauge creates a no-op instrument.
+func (NopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) {
+ return nopInstrumentFloat64, nil
+}
+
+// Float64Histogram creates a no-op instrument.
+func (NopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) {
+ return nopInstrumentFloat64, nil
+}
+
+// Float64AsyncCounter creates a no-op instrument.
+func (NopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentFloat64, nil
+}
+
+// Float64AsyncUpDownCounter creates a no-op instrument.
+func (NopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentFloat64, nil
+}
+
+// Float64AsyncGauge creates a no-op instrument.
+func (NopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrumentFloat64, nil
+}
+
+type nopInstrument[N any] struct{}
+
+func (nopInstrument[N]) Add(context.Context, N, ...RecordMetricOption) {}
+func (nopInstrument[N]) Sample(context.Context, N, ...RecordMetricOption) {}
+func (nopInstrument[N]) Record(context.Context, N, ...RecordMetricOption) {}
+func (nopInstrument[_]) Stop() {}
+
+var nopInstrumentInt64 = nopInstrument[int64]{}
+var nopInstrumentFloat64 = nopInstrument[float64]{}
diff --git a/vendor/github.com/aws/smithy-go/middleware/context.go b/vendor/github.com/aws/smithy-go/middleware/context.go
new file mode 100644
index 000000000..f51aa4f04
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/context.go
@@ -0,0 +1,41 @@
+package middleware
+
+import "context"
+
+type (
+ serviceIDKey struct{}
+ operationNameKey struct{}
+)
+
+// WithServiceID adds a service ID to the context, scoped to middleware stack
+// values.
+//
+// This API is called in the client runtime when bootstrapping an operation and
+// should not typically be used directly.
+func WithServiceID(parent context.Context, id string) context.Context {
+ return WithStackValue(parent, serviceIDKey{}, id)
+}
+
+// GetServiceID retrieves the service ID from the context. This is typically
+// the service shape's name from its Smithy model. Service clients for specific
+// systems (e.g. AWS SDK) may use an alternate designated value.
+func GetServiceID(ctx context.Context) string {
+ id, _ := GetStackValue(ctx, serviceIDKey{}).(string)
+ return id
+}
+
+// WithOperationName adds the operation name to the context, scoped to
+// middleware stack values.
+//
+// This API is called in the client runtime when bootstrapping an operation and
+// should not typically be used directly.
+func WithOperationName(parent context.Context, id string) context.Context {
+ return WithStackValue(parent, operationNameKey{}, id)
+}
+
+// GetOperationName retrieves the operation name from the context. This is
+// typically the operation shape's name from its Smithy model.
+func GetOperationName(ctx context.Context) string {
+ name, _ := GetStackValue(ctx, operationNameKey{}).(string)
+ return name
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go
new file mode 100644
index 000000000..9858928a7
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/doc.go
@@ -0,0 +1,67 @@
+// Package middleware provides transport agnostic middleware for decorating SDK
+// handlers.
+//
+// The Smithy middleware stack provides ordered behavior to be invoked on an
+// underlying handler. The stack is separated into steps that are invoked in a
+// static order. A step is a collection of middleware that are injected into a
+// ordered list defined by the user. The user may add, insert, swap, and remove a
+// step's middleware. When the stack is invoked the step middleware become static,
+// and their order cannot be modified.
+//
+// A stack and its step middleware are **not** safe to modify concurrently.
+//
+// A stack will use the ordered list of middleware to decorate a underlying
+// handler. A handler could be something like an HTTP Client that round trips an
+// API operation over HTTP.
+//
+// Smithy Middleware Stack
+//
+// A Stack is a collection of middleware that wrap a handler. The stack can be
+// broken down into discreet steps. Each step may contain zero or more middleware
+// specific to that stack's step.
+//
+// A Stack Step is a predefined set of middleware that are invoked in a static
+// order by the Stack. These steps represent fixed points in the middleware stack
+// for organizing specific behavior, such as serialize and build. A Stack Step is
+// composed of zero or more middleware that are specific to that step. A step may
+// define its own set of input/output parameters the generic input/output
+// parameters are cast from. A step calls its middleware recursively, before
+// calling the next step in the stack returning the result or error of the step
+// middleware decorating the underlying handler.
+//
+// * Initialize: Prepares the input, and sets any default parameters as needed,
+// (e.g. idempotency token, and presigned URLs).
+//
+// * Serialize: Serializes the prepared input into a data structure that can be
+// consumed by the target transport's message, (e.g. REST-JSON serialization).
+//
+// * Build: Adds additional metadata to the serialized transport message, (e.g.
+// HTTP's Content-Length header, or body checksum). Decorations and
+// modifications to the message should be copied to all message attempts.
+//
+// * Finalize: Performs final preparations needed before sending the message. The
+// message should already be complete by this stage, and is only alternated to
+// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request
+// signing).
+//
+// * Deserialize: Reacts to the handler's response returned by the recipient of
+// the request message. Deserializes the response into a structured type or
+// error above stacks can react to.
+//
+// Adding Middleware to a Stack Step
+//
+// Middleware can be added to a step front or back, or relative, by name, to an
+// existing middleware in that stack. If a middleware does not have a name, a
+// unique name will be generated at the middleware and be added to the step.
+//
+// // Create middleware stack
+// stack := middleware.NewStack()
+//
+// // Add middleware to stack steps
+// stack.Initialize.Add(paramValidationMiddleware, middleware.After)
+// stack.Serialize.Add(marshalOperationFoo, middleware.After)
+// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After)
+//
+// // Invoke middleware on handler.
+// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler)
+package middleware
diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go
new file mode 100644
index 000000000..c2f0dbb6b
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/logging.go
@@ -0,0 +1,46 @@
+package middleware
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/logging"
+)
+
+// loggerKey is the context value key for which the logger is associated with.
+type loggerKey struct{}
+
+// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger
+// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed
+// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is.
+func GetLogger(ctx context.Context) logging.Logger {
+ logger, ok := ctx.Value(loggerKey{}).(logging.Logger)
+ if !ok || logger == nil {
+ return logging.Nop{}
+ }
+
+ return logging.WithContext(ctx, logger)
+}
+
+// SetLogger sets the provided logger value on the provided ctx.
+func SetLogger(ctx context.Context, logger logging.Logger) context.Context {
+ return context.WithValue(ctx, loggerKey{}, logger)
+}
+
+type setLogger struct {
+ Logger logging.Logger
+}
+
+// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context.
+func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error {
+ return stack.Initialize.Add(&setLogger{Logger: logger}, After)
+}
+
+func (a *setLogger) ID() string {
+ return "SetLogger"
+}
+
+func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) (
+ out InitializeOutput, metadata Metadata, err error,
+) {
+ return next.HandleInitialize(SetLogger(ctx, a.Logger), in)
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go
new file mode 100644
index 000000000..7bb7dbcf5
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/metadata.go
@@ -0,0 +1,65 @@
+package middleware
+
+// MetadataReader provides an interface for reading metadata from the
+// underlying metadata container.
+type MetadataReader interface {
+ Get(key interface{}) interface{}
+}
+
+// Metadata provides storing and reading metadata values. Keys may be any
+// comparable value type. Get and set will panic if key is not a comparable
+// value type.
+//
+// Metadata uses lazy initialization, and Set method must be called as an
+// addressable value, or pointer. Not doing so may cause key/value pair to not
+// be set.
+type Metadata struct {
+ values map[interface{}]interface{}
+}
+
+// Get attempts to retrieve the value the key points to. Returns nil if the
+// key was not found.
+//
+// Panics if key type is not comparable.
+func (m Metadata) Get(key interface{}) interface{} {
+ return m.values[key]
+}
+
+// Clone creates a shallow copy of Metadata entries, returning a new Metadata
+// value with the original entries copied into it.
+func (m Metadata) Clone() Metadata {
+ vs := make(map[interface{}]interface{}, len(m.values))
+ for k, v := range m.values {
+ vs[k] = v
+ }
+
+ return Metadata{
+ values: vs,
+ }
+}
+
+// Set stores the value pointed to by the key. If a value already exists at
+// that key it will be replaced with the new value.
+//
+// Set method must be called as an addressable value, or pointer. If Set is not
+// called as an addressable value or pointer, the key value pair being set may
+// be lost.
+//
+// Panics if the key type is not comparable.
+func (m *Metadata) Set(key, value interface{}) {
+ if m.values == nil {
+ m.values = map[interface{}]interface{}{}
+ }
+ m.values[key] = value
+}
+
+// Has returns whether the key exists in the metadata.
+//
+// Panics if the key type is not comparable.
+func (m Metadata) Has(key interface{}) bool {
+ if m.values == nil {
+ return false
+ }
+ _, ok := m.values[key]
+ return ok
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go
new file mode 100644
index 000000000..803b7c751
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/middleware.go
@@ -0,0 +1,71 @@
+package middleware
+
+import (
+ "context"
+)
+
+// Handler provides the interface for performing the logic to obtain an output,
+// or error for the given input.
+type Handler interface {
+ // Handle performs logic to obtain an output for the given input. Handler
+ // should be decorated with middleware to perform input specific behavior.
+ Handle(ctx context.Context, input interface{}) (
+ output interface{}, metadata Metadata, err error,
+ )
+}
+
+// HandlerFunc provides a wrapper around a function pointer to be used as a
+// middleware handler.
+type HandlerFunc func(ctx context.Context, input interface{}) (
+ output interface{}, metadata Metadata, err error,
+)
+
+// Handle invokes the underlying function, returning the result.
+func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) (
+ output interface{}, metadata Metadata, err error,
+) {
+ return fn(ctx, input)
+}
+
+// Middleware provides the interface to call handlers in a chain.
+type Middleware interface {
+ // ID provides a unique identifier for the middleware.
+ ID() string
+
+ // Performs the middleware's handling of the input, returning the output,
+ // or error. The middleware can invoke the next Handler if handling should
+ // continue.
+ HandleMiddleware(ctx context.Context, input interface{}, next Handler) (
+ output interface{}, metadata Metadata, err error,
+ )
+}
+
+// decoratedHandler wraps a middleware in order to to call the next handler in
+// the chain.
+type decoratedHandler struct {
+ // The next handler to be called.
+ Next Handler
+
+ // The current middleware decorating the handler.
+ With Middleware
+}
+
+// Handle implements the Handler interface to handle a operation invocation.
+func (m decoratedHandler) Handle(ctx context.Context, input interface{}) (
+ output interface{}, metadata Metadata, err error,
+) {
+ return m.With.HandleMiddleware(ctx, input, m.Next)
+}
+
+// DecorateHandler decorates a handler with a middleware. Wrapping the handler
+// with the middleware.
+func DecorateHandler(h Handler, with ...Middleware) Handler {
+ for i := len(with) - 1; i >= 0; i-- {
+ h = decoratedHandler{
+ Next: h,
+ With: with[i],
+ }
+ }
+
+ return h
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go
new file mode 100644
index 000000000..daf90136e
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go
@@ -0,0 +1,270 @@
+package middleware
+
+import "fmt"
+
+// RelativePosition provides specifying the relative position of a middleware
+// in an ordered group.
+type RelativePosition int
+
+// Relative position for middleware in steps.
+const (
+ After RelativePosition = iota
+ Before
+)
+
+type ider interface {
+ ID() string
+}
+
+// orderedIDs provides an ordered collection of items with relative ordering
+// by name.
+type orderedIDs struct {
+ order *relativeOrder
+ items map[string]ider
+}
+
+// selected based on the general upper bound of # of middlewares in each step
+// in the downstream aws-sdk-go-v2
+const baseOrderedItems = 8
+
+func newOrderedIDs(cap int) *orderedIDs {
+ return &orderedIDs{
+ order: newRelativeOrder(cap),
+ items: make(map[string]ider, cap),
+ }
+}
+
+// Add injects the item to the relative position of the item group. Returns an
+// error if the item already exists.
+func (g *orderedIDs) Add(m ider, pos RelativePosition) error {
+ id := m.ID()
+ if len(id) == 0 {
+ return fmt.Errorf("empty ID, ID must not be empty")
+ }
+
+ if err := g.order.Add(pos, id); err != nil {
+ return err
+ }
+
+ g.items[id] = m
+ return nil
+}
+
+// Insert injects the item relative to an existing item id. Returns an error if
+// the original item does not exist, or the item being added already exists.
+func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error {
+ if len(m.ID()) == 0 {
+ return fmt.Errorf("insert ID must not be empty")
+ }
+ if len(relativeTo) == 0 {
+ return fmt.Errorf("relative to ID must not be empty")
+ }
+
+ if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil {
+ return err
+ }
+
+ g.items[m.ID()] = m
+ return nil
+}
+
+// Get returns the ider identified by id. If ider is not present, returns false.
+func (g *orderedIDs) Get(id string) (ider, bool) {
+ v, ok := g.items[id]
+ return v, ok
+}
+
+// Swap removes the item by id, replacing it with the new item. Returns an error
+// if the original item doesn't exist.
+func (g *orderedIDs) Swap(id string, m ider) (ider, error) {
+ if len(id) == 0 {
+ return nil, fmt.Errorf("swap from ID must not be empty")
+ }
+
+ iderID := m.ID()
+ if len(iderID) == 0 {
+ return nil, fmt.Errorf("swap to ID must not be empty")
+ }
+
+ if err := g.order.Swap(id, iderID); err != nil {
+ return nil, err
+ }
+
+ removed := g.items[id]
+
+ delete(g.items, id)
+ g.items[iderID] = m
+
+ return removed, nil
+}
+
+// Remove removes the item by id. Returns an error if the item
+// doesn't exist.
+func (g *orderedIDs) Remove(id string) (ider, error) {
+ if len(id) == 0 {
+ return nil, fmt.Errorf("remove ID must not be empty")
+ }
+
+ if err := g.order.Remove(id); err != nil {
+ return nil, err
+ }
+
+ removed := g.items[id]
+ delete(g.items, id)
+ return removed, nil
+}
+
+func (g *orderedIDs) List() []string {
+ items := g.order.List()
+ order := make([]string, len(items))
+ copy(order, items)
+ return order
+}
+
+// Clear removes all entries and slots.
+func (g *orderedIDs) Clear() {
+ g.order.Clear()
+ g.items = map[string]ider{}
+}
+
+// GetOrder returns the item in the order it should be invoked in.
+func (g *orderedIDs) GetOrder() []interface{} {
+ order := g.order.List()
+ ordered := make([]interface{}, len(order))
+ for i := 0; i < len(order); i++ {
+ ordered[i] = g.items[order[i]]
+ }
+
+ return ordered
+}
+
+// relativeOrder provides ordering of item
+type relativeOrder struct {
+ order []string
+}
+
+func newRelativeOrder(cap int) *relativeOrder {
+ return &relativeOrder{
+ order: make([]string, 0, cap),
+ }
+}
+
+// Add inserts an item into the order relative to the position provided.
+func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error {
+ if len(ids) == 0 {
+ return nil
+ }
+
+ for _, id := range ids {
+ if _, ok := s.has(id); ok {
+ return fmt.Errorf("already exists, %v", id)
+ }
+ }
+
+ switch pos {
+ case Before:
+ return s.insert(0, Before, ids...)
+
+ case After:
+ s.order = append(s.order, ids...)
+
+ default:
+ return fmt.Errorf("invalid position, %v", int(pos))
+ }
+
+ return nil
+}
+
+// Insert injects an item before or after the relative item. Returns
+// an error if the relative item does not exist.
+func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error {
+ if len(ids) == 0 {
+ return nil
+ }
+
+ for _, id := range ids {
+ if _, ok := s.has(id); ok {
+ return fmt.Errorf("already exists, %v", id)
+ }
+ }
+
+ i, ok := s.has(relativeTo)
+ if !ok {
+ return fmt.Errorf("not found, %v", relativeTo)
+ }
+
+ return s.insert(i, pos, ids...)
+}
+
+// Swap will replace the item id with the to item. Returns an
+// error if the original item id does not exist. Allows swapping out an
+// item for another item with the same id.
+func (s *relativeOrder) Swap(id, to string) error {
+ i, ok := s.has(id)
+ if !ok {
+ return fmt.Errorf("not found, %v", id)
+ }
+
+ if _, ok = s.has(to); ok && id != to {
+ return fmt.Errorf("already exists, %v", to)
+ }
+
+ s.order[i] = to
+ return nil
+}
+
+func (s *relativeOrder) Remove(id string) error {
+ i, ok := s.has(id)
+ if !ok {
+ return fmt.Errorf("not found, %v", id)
+ }
+
+ s.order = append(s.order[:i], s.order[i+1:]...)
+ return nil
+}
+
+func (s *relativeOrder) List() []string {
+ return s.order
+}
+
+func (s *relativeOrder) Clear() {
+ s.order = s.order[0:0]
+}
+
+func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error {
+ switch pos {
+ case Before:
+ n := len(ids)
+ var src []string
+ if n <= cap(s.order)-len(s.order) {
+ s.order = s.order[:len(s.order)+n]
+ src = s.order
+ } else {
+ src = s.order
+ s.order = make([]string, len(s.order)+n)
+ copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half
+ }
+ copy(s.order[i+n:], src[i:])
+ copy(s.order[i:], ids)
+ case After:
+ if i == len(s.order)-1 || len(s.order) == 0 {
+ s.order = append(s.order, ids...)
+ } else {
+ s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...)
+ }
+
+ default:
+ return fmt.Errorf("invalid position, %v", int(pos))
+ }
+
+ return nil
+}
+
+func (s *relativeOrder) has(id string) (i int, found bool) {
+ for i := 0; i < len(s.order); i++ {
+ if s.order[i] == id {
+ return i, true
+ }
+ }
+ return 0, false
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go
new file mode 100644
index 000000000..45ccb5b93
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/stack.go
@@ -0,0 +1,209 @@
+package middleware
+
+import (
+ "context"
+ "io"
+ "strings"
+)
+
+// Stack provides protocol and transport agnostic set of middleware split into
+// distinct steps. Steps have specific transitions between them, that are
+// managed by the individual step.
+//
+// Steps are composed as middleware around the underlying handler in the
+// following order:
+//
+// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler
+//
+// Any middleware within the chain may choose to stop and return an error or
+// response. Since the middleware decorate the handler like a call stack, each
+// middleware will receive the result of the next middleware in the chain.
+// Middleware that does not need to react to an input, or result must forward
+// along the input down the chain, or return the result back up the chain.
+//
+// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler
+type Stack struct {
+ // Initialize prepares the input, and sets any default parameters as
+ // needed, (e.g. idempotency token, and presigned URLs).
+ //
+ // Takes Input Parameters, and returns result or error.
+ //
+ // Receives result or error from Serialize step.
+ Initialize *InitializeStep
+
+ // Serialize serializes the prepared input into a data structure that can be consumed
+ // by the target transport's message, (e.g. REST-JSON serialization)
+ //
+ // Converts Input Parameters into a Request, and returns the result or error.
+ //
+ // Receives result or error from Build step.
+ Serialize *SerializeStep
+
+ // Build adds additional metadata to the serialized transport message
+ // (e.g. HTTP's Content-Length header, or body checksum). Decorations and
+ // modifications to the message should be copied to all message attempts.
+ //
+ // Takes Request, and returns result or error.
+ //
+ // Receives result or error from Finalize step.
+ Build *BuildStep
+
+ // Finalize performs final preparations needed before sending the message. The
+ // message should already be complete by this stage, and is only alternated
+ // to meet the expectations of the recipient (e.g. Retry and AWS SigV4
+ // request signing)
+ //
+ // Takes Request, and returns result or error.
+ //
+ // Receives result or error from Deserialize step.
+ Finalize *FinalizeStep
+
+ // Deserialize reacts to the handler's response returned by the recipient of the request
+ // message. Deserializes the response into a structured type or error above
+ // stacks can react to.
+ //
+ // Should only forward Request to underlying handler.
+ //
+ // Takes Request, and returns result or error.
+ //
+ // Receives raw response, or error from underlying handler.
+ Deserialize *DeserializeStep
+
+ id string
+}
+
+// NewStack returns an initialize empty stack.
+func NewStack(id string, newRequestFn func() interface{}) *Stack {
+ return &Stack{
+ id: id,
+ Initialize: NewInitializeStep(),
+ Serialize: NewSerializeStep(newRequestFn),
+ Build: NewBuildStep(),
+ Finalize: NewFinalizeStep(),
+ Deserialize: NewDeserializeStep(),
+ }
+}
+
+// ID returns the unique ID for the stack as a middleware.
+func (s *Stack) ID() string { return s.id }
+
+// HandleMiddleware invokes the middleware stack decorating the next handler.
+// Each step of stack will be invoked in order before calling the next step.
+// With the next handler call last.
+//
+// The input value must be the input parameters of the operation being
+// performed.
+//
+// Will return the result of the operation, or error.
+func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) (
+ output interface{}, metadata Metadata, err error,
+) {
+ h := DecorateHandler(next,
+ s.Initialize,
+ s.Serialize,
+ s.Build,
+ s.Finalize,
+ s.Deserialize,
+ )
+
+ return h.Handle(ctx, input)
+}
+
+// List returns a list of all middleware in the stack by step.
+func (s *Stack) List() []string {
+ var l []string
+ l = append(l, s.id)
+
+ l = append(l, s.Initialize.ID())
+ l = append(l, s.Initialize.List()...)
+
+ l = append(l, s.Serialize.ID())
+ l = append(l, s.Serialize.List()...)
+
+ l = append(l, s.Build.ID())
+ l = append(l, s.Build.List()...)
+
+ l = append(l, s.Finalize.ID())
+ l = append(l, s.Finalize.List()...)
+
+ l = append(l, s.Deserialize.ID())
+ l = append(l, s.Deserialize.List()...)
+
+ return l
+}
+
+func (s *Stack) String() string {
+ var b strings.Builder
+
+ w := &indentWriter{w: &b}
+
+ w.WriteLine(s.id)
+ w.Push()
+
+ writeStepItems(w, s.Initialize)
+ writeStepItems(w, s.Serialize)
+ writeStepItems(w, s.Build)
+ writeStepItems(w, s.Finalize)
+ writeStepItems(w, s.Deserialize)
+
+ return b.String()
+}
+
+type stackStepper interface {
+ ID() string
+ List() []string
+}
+
+func writeStepItems(w *indentWriter, s stackStepper) {
+ type lister interface {
+ List() []string
+ }
+
+ w.WriteLine(s.ID())
+ w.Push()
+
+ defer w.Pop()
+
+ // ignore stack to prevent circular iterations
+ if _, ok := s.(*Stack); ok {
+ return
+ }
+
+ for _, id := range s.List() {
+ w.WriteLine(id)
+ }
+}
+
+type stringWriter interface {
+ io.Writer
+ WriteString(string) (int, error)
+ WriteRune(rune) (int, error)
+}
+
+type indentWriter struct {
+ w stringWriter
+ depth int
+}
+
+const indentDepth = "\t\t\t\t\t\t\t\t\t\t"
+
+func (w *indentWriter) Push() {
+ w.depth++
+}
+
+func (w *indentWriter) Pop() {
+ w.depth--
+ if w.depth < 0 {
+ w.depth = 0
+ }
+}
+
+func (w *indentWriter) WriteLine(v string) {
+ w.w.WriteString(indentDepth[:w.depth])
+
+ v = strings.ReplaceAll(v, "\n", "\\n")
+ v = strings.ReplaceAll(v, "\r", "\\r")
+
+ w.w.WriteString(v)
+ w.w.WriteRune('\n')
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go
new file mode 100644
index 000000000..ef96009ba
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/stack_values.go
@@ -0,0 +1,100 @@
+package middleware
+
+import (
+ "context"
+ "reflect"
+ "strings"
+)
+
+// WithStackValue adds a key value pair to the context that is intended to be
+// scoped to a stack. Use ClearStackValues to get a new context with all stack
+// values cleared.
+func WithStackValue(ctx context.Context, key, value interface{}) context.Context {
+ md, _ := ctx.Value(stackValuesKey{}).(*stackValues)
+
+ md = withStackValue(md, key, value)
+ return context.WithValue(ctx, stackValuesKey{}, md)
+}
+
+// ClearStackValues returns a context without any stack values.
+func ClearStackValues(ctx context.Context) context.Context {
+ return context.WithValue(ctx, stackValuesKey{}, nil)
+}
+
+// GetStackValues returns the value pointed to by the key within the stack
+// values, if it is present.
+func GetStackValue(ctx context.Context, key interface{}) interface{} {
+ md, _ := ctx.Value(stackValuesKey{}).(*stackValues)
+ if md == nil {
+ return nil
+ }
+
+ return md.Value(key)
+}
+
+type stackValuesKey struct{}
+
+type stackValues struct {
+ key interface{}
+ value interface{}
+ parent *stackValues
+}
+
+func withStackValue(parent *stackValues, key, value interface{}) *stackValues {
+ if key == nil {
+ panic("nil key")
+ }
+ if !reflect.TypeOf(key).Comparable() {
+ panic("key is not comparable")
+ }
+ return &stackValues{key: key, value: value, parent: parent}
+}
+
+func (m *stackValues) Value(key interface{}) interface{} {
+ if key == m.key {
+ return m.value
+ }
+
+ if m.parent == nil {
+ return nil
+ }
+
+ return m.parent.Value(key)
+}
+
+func (c *stackValues) String() string {
+ var str strings.Builder
+
+ cc := c
+ for cc == nil {
+ str.WriteString("(" +
+ reflect.TypeOf(c.key).String() +
+ ": " +
+ stringify(cc.value) +
+ ")")
+ if cc.parent != nil {
+ str.WriteString(" -> ")
+ }
+ cc = cc.parent
+ }
+ str.WriteRune('}')
+
+ return str.String()
+}
+
+type stringer interface {
+ String() string
+}
+
+// stringify tries a bit to stringify v, without using fmt, since we don't
+// want context depending on the unicode tables. This is only used by
+// *valueCtx.String().
+func stringify(v interface{}) string {
+ switch s := v.(type) {
+ case stringer:
+ return s.String()
+ case string:
+ return s
+ }
+ return ""
+}
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go
new file mode 100644
index 000000000..cc7fe89c9
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_build.go
@@ -0,0 +1,211 @@
+package middleware
+
+import (
+ "context"
+)
+
+// BuildInput provides the input parameters for the BuildMiddleware to consume.
+// BuildMiddleware may modify the Request value before forwarding the input
+// along to the next BuildHandler.
+type BuildInput struct {
+ Request interface{}
+}
+
+// BuildOutput provides the result returned by the next BuildHandler.
+type BuildOutput struct {
+ Result interface{}
+}
+
+// BuildHandler provides the interface for the next handler the
+// BuildMiddleware will call in the middleware chain.
+type BuildHandler interface {
+ HandleBuild(ctx context.Context, in BuildInput) (
+ out BuildOutput, metadata Metadata, err error,
+ )
+}
+
+// BuildMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next BuildHandler for further
+// processing.
+type BuildMiddleware interface {
+ // Unique ID for the middleware in theBuildStep. The step does not allow
+ // duplicate IDs.
+ ID() string
+
+ // Invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) (
+ out BuildOutput, metadata Metadata, err error,
+ )
+}
+
+// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided,
+// and the func to be invoked.
+func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware {
+ return buildMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type buildMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)
+}
+
+// ID returns the unique ID for the middleware.
+func (s buildMiddlewareFunc) ID() string { return s.id }
+
+// HandleBuild invokes the middleware Fn.
+func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) (
+ out BuildOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ BuildMiddleware = (buildMiddlewareFunc{})
+
+// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on
+// a handler.
+type BuildStep struct {
+ ids *orderedIDs
+}
+
+// NewBuildStep returns a BuildStep ready to have middleware for
+// initialization added to it.
+func NewBuildStep() *BuildStep {
+ return &BuildStep{
+ ids: newOrderedIDs(baseOrderedItems),
+ }
+}
+
+var _ Middleware = (*BuildStep)(nil)
+
+// ID returns the unique name of the step as a middleware.
+func (s *BuildStep) ID() string {
+ return "Build stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h BuildHandler = buildWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedBuildHandler{
+ Next: h,
+ With: order[i].(BuildMiddleware),
+ }
+ }
+
+ sIn := BuildInput{
+ Request: in,
+ }
+
+ res, metadata, err := h.HandleBuild(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *BuildStep) Get(id string) (BuildMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(BuildMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware id.
+// Returns an error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or an error if the middleware to be removed
+// doesn't exist.
+func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(BuildMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *BuildStep) Remove(id string) (BuildMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(BuildMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *BuildStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *BuildStep) Clear() {
+ s.ids.Clear()
+}
+
+type buildWrapHandler struct {
+ Next Handler
+}
+
+var _ BuildHandler = (*buildWrapHandler)(nil)
+
+// Implements BuildHandler, converts types and delegates to underlying
+// generic handler.
+func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) (
+ out BuildOutput, metadata Metadata, err error,
+) {
+ res, metadata, err := w.Next.Handle(ctx, in.Request)
+ return BuildOutput{
+ Result: res,
+ }, metadata, err
+}
+
+type decoratedBuildHandler struct {
+ Next BuildHandler
+ With BuildMiddleware
+}
+
+var _ BuildHandler = (*decoratedBuildHandler)(nil)
+
+func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) (
+ out BuildOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleBuild(ctx, in, h.Next)
+}
+
+// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler.
+type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error)
+
+// HandleBuild invokes the wrapped function with the provided arguments.
+func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) {
+ return b(ctx, in)
+}
+
+var _ BuildHandler = BuildHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go
new file mode 100644
index 000000000..9a6679a59
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go
@@ -0,0 +1,218 @@
+package middleware
+
+import (
+ "context"
+)
+
+// DeserializeInput provides the input parameters for the DeserializeInput to
+// consume. DeserializeMiddleware should not modify the Request, and instead
+// forward it along to the next DeserializeHandler.
+type DeserializeInput struct {
+ Request interface{}
+}
+
+// DeserializeOutput provides the result returned by the next
+// DeserializeHandler. The DeserializeMiddleware should deserialize the
+// RawResponse into a Result that can be consumed by middleware higher up in
+// the stack.
+type DeserializeOutput struct {
+ RawResponse interface{}
+ Result interface{}
+}
+
+// DeserializeHandler provides the interface for the next handler the
+// DeserializeMiddleware will call in the middleware chain.
+type DeserializeHandler interface {
+ HandleDeserialize(ctx context.Context, in DeserializeInput) (
+ out DeserializeOutput, metadata Metadata, err error,
+ )
+}
+
+// DeserializeMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next DeserializeHandler for further
+// processing.
+type DeserializeMiddleware interface {
+ // ID returns a unique ID for the middleware in the DeserializeStep. The step does not
+ // allow duplicate IDs.
+ ID() string
+
+ // HandleDeserialize invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) (
+ out DeserializeOutput, metadata Metadata, err error,
+ )
+}
+
+// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID
+// provided, and the func to be invoked.
+func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware {
+ return deserializeMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type deserializeMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, DeserializeInput, DeserializeHandler) (
+ DeserializeOutput, Metadata, error,
+ )
+}
+
+// ID returns the unique ID for the middleware.
+func (s deserializeMiddlewareFunc) ID() string { return s.id }
+
+// HandleDeserialize invokes the middleware Fn.
+func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) (
+ out DeserializeOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ DeserializeMiddleware = (deserializeMiddlewareFunc{})
+
+// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be
+// invoked on a handler.
+type DeserializeStep struct {
+ ids *orderedIDs
+}
+
+// NewDeserializeStep returns a DeserializeStep ready to have middleware for
+// initialization added to it.
+func NewDeserializeStep() *DeserializeStep {
+ return &DeserializeStep{
+ // downstream SDK typically has larger Deserialize step
+ ids: newOrderedIDs(baseOrderedItems * 2),
+ }
+}
+
+var _ Middleware = (*DeserializeStep)(nil)
+
+// ID returns the unique ID of the step as a middleware.
+func (s *DeserializeStep) ID() string {
+ return "Deserialize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h DeserializeHandler = deserializeWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedDeserializeHandler{
+ Next: h,
+ With: order[i].(DeserializeMiddleware),
+ }
+ }
+
+ sIn := DeserializeInput{
+ Request: in,
+ }
+
+ res, metadata, err := h.HandleDeserialize(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(DeserializeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware ID.
+// Returns error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(DeserializeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(DeserializeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *DeserializeStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *DeserializeStep) Clear() {
+ s.ids.Clear()
+}
+
+type deserializeWrapHandler struct {
+ Next Handler
+}
+
+var _ DeserializeHandler = (*deserializeWrapHandler)(nil)
+
+// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying
+// generic handler.
+func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) (
+ out DeserializeOutput, metadata Metadata, err error,
+) {
+ resp, metadata, err := w.Next.Handle(ctx, in.Request)
+ return DeserializeOutput{
+ RawResponse: resp,
+ }, metadata, err
+}
+
+type decoratedDeserializeHandler struct {
+ Next DeserializeHandler
+ With DeserializeMiddleware
+}
+
+var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil)
+
+func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) (
+ out DeserializeOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleDeserialize(ctx, in, h.Next)
+}
+
+// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler.
+type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error)
+
+// HandleDeserialize invokes the wrapped function with the given arguments.
+func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) {
+ return d(ctx, in)
+}
+
+var _ DeserializeHandler = DeserializeHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go
new file mode 100644
index 000000000..76eab2490
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go
@@ -0,0 +1,212 @@
+package middleware
+
+import "context"
+
+// FinalizeInput provides the input parameters for the FinalizeMiddleware to
+// consume. FinalizeMiddleware may modify the Request value before forwarding
+// the FinalizeInput along to the next next FinalizeHandler.
+type FinalizeInput struct {
+ Request interface{}
+}
+
+// FinalizeOutput provides the result returned by the next FinalizeHandler.
+type FinalizeOutput struct {
+ Result interface{}
+}
+
+// FinalizeHandler provides the interface for the next handler the
+// FinalizeMiddleware will call in the middleware chain.
+type FinalizeHandler interface {
+ HandleFinalize(ctx context.Context, in FinalizeInput) (
+ out FinalizeOutput, metadata Metadata, err error,
+ )
+}
+
+// FinalizeMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next FinalizeHandler for further
+// processing.
+type FinalizeMiddleware interface {
+ // ID returns a unique ID for the middleware in the FinalizeStep. The step does not
+ // allow duplicate IDs.
+ ID() string
+
+ // HandleFinalize invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) (
+ out FinalizeOutput, metadata Metadata, err error,
+ )
+}
+
+// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID
+// provided, and the func to be invoked.
+func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware {
+ return finalizeMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type finalizeMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, FinalizeInput, FinalizeHandler) (
+ FinalizeOutput, Metadata, error,
+ )
+}
+
+// ID returns the unique ID for the middleware.
+func (s finalizeMiddlewareFunc) ID() string { return s.id }
+
+// HandleFinalize invokes the middleware Fn.
+func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) (
+ out FinalizeOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ FinalizeMiddleware = (finalizeMiddlewareFunc{})
+
+// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be
+// invoked on a handler.
+type FinalizeStep struct {
+ ids *orderedIDs
+}
+
+// NewFinalizeStep returns a FinalizeStep ready to have middleware for
+// initialization added to it.
+func NewFinalizeStep() *FinalizeStep {
+ return &FinalizeStep{
+ // downstream SDK typically has larger Finalize step
+ ids: newOrderedIDs(baseOrderedItems * 2),
+ }
+}
+
+var _ Middleware = (*FinalizeStep)(nil)
+
+// ID returns the unique id of the step as a middleware.
+func (s *FinalizeStep) ID() string {
+ return "Finalize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h FinalizeHandler = finalizeWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedFinalizeHandler{
+ Next: h,
+ With: order[i].(FinalizeMiddleware),
+ }
+ }
+
+ sIn := FinalizeInput{
+ Request: in,
+ }
+
+ res, metadata, err := h.HandleFinalize(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(FinalizeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware ID.
+// Returns error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(FinalizeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(FinalizeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *FinalizeStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *FinalizeStep) Clear() {
+ s.ids.Clear()
+}
+
+type finalizeWrapHandler struct {
+ Next Handler
+}
+
+var _ FinalizeHandler = (*finalizeWrapHandler)(nil)
+
+// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying
+// generic handler.
+func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) (
+ out FinalizeOutput, metadata Metadata, err error,
+) {
+ res, metadata, err := w.Next.Handle(ctx, in.Request)
+ return FinalizeOutput{
+ Result: res,
+ }, metadata, err
+}
+
+type decoratedFinalizeHandler struct {
+ Next FinalizeHandler
+ With FinalizeMiddleware
+}
+
+var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil)
+
+func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) (
+ out FinalizeOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleFinalize(ctx, in, h.Next)
+}
+
+// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler.
+type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error)
+
+// HandleFinalize invokes the wrapped function with the given arguments.
+func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) {
+ return f(ctx, in)
+}
+
+var _ FinalizeHandler = FinalizeHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go
new file mode 100644
index 000000000..312be3a33
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go
@@ -0,0 +1,211 @@
+package middleware
+
+import "context"
+
+// InitializeInput wraps the input parameters for the InitializeMiddlewares to
+// consume. InitializeMiddleware may modify the parameter value before
+// forwarding it along to the next InitializeHandler.
+type InitializeInput struct {
+ Parameters interface{}
+}
+
+// InitializeOutput provides the result returned by the next InitializeHandler.
+type InitializeOutput struct {
+ Result interface{}
+}
+
+// InitializeHandler provides the interface for the next handler the
+// InitializeMiddleware will call in the middleware chain.
+type InitializeHandler interface {
+ HandleInitialize(ctx context.Context, in InitializeInput) (
+ out InitializeOutput, metadata Metadata, err error,
+ )
+}
+
+// InitializeMiddleware provides the interface for middleware specific to the
+// initialize step. Delegates to the next InitializeHandler for further
+// processing.
+type InitializeMiddleware interface {
+ // ID returns a unique ID for the middleware in the InitializeStep. The step does not
+ // allow duplicate IDs.
+ ID() string
+
+ // HandleInitialize invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) (
+ out InitializeOutput, metadata Metadata, err error,
+ )
+}
+
+// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided,
+// and the func to be invoked.
+func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware {
+ return initializeMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type initializeMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, InitializeInput, InitializeHandler) (
+ InitializeOutput, Metadata, error,
+ )
+}
+
+// ID returns the unique ID for the middleware.
+func (s initializeMiddlewareFunc) ID() string { return s.id }
+
+// HandleInitialize invokes the middleware Fn.
+func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) (
+ out InitializeOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ InitializeMiddleware = (initializeMiddlewareFunc{})
+
+// InitializeStep provides the ordered grouping of InitializeMiddleware to be
+// invoked on a handler.
+type InitializeStep struct {
+ ids *orderedIDs
+}
+
+// NewInitializeStep returns an InitializeStep ready to have middleware for
+// initialization added to it.
+func NewInitializeStep() *InitializeStep {
+ return &InitializeStep{
+ ids: newOrderedIDs(baseOrderedItems),
+ }
+}
+
+var _ Middleware = (*InitializeStep)(nil)
+
+// ID returns the unique ID of the step as a middleware.
+func (s *InitializeStep) ID() string {
+ return "Initialize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h InitializeHandler = initializeWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedInitializeHandler{
+ Next: h,
+ With: order[i].(InitializeMiddleware),
+ }
+ }
+
+ sIn := InitializeInput{
+ Parameters: in,
+ }
+
+ res, metadata, err := h.HandleInitialize(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(InitializeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware ID.
+// Returns error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(InitializeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(InitializeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *InitializeStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *InitializeStep) Clear() {
+ s.ids.Clear()
+}
+
+type initializeWrapHandler struct {
+ Next Handler
+}
+
+var _ InitializeHandler = (*initializeWrapHandler)(nil)
+
+// HandleInitialize implements InitializeHandler, converts types and delegates to underlying
+// generic handler.
+func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) (
+ out InitializeOutput, metadata Metadata, err error,
+) {
+ res, metadata, err := w.Next.Handle(ctx, in.Parameters)
+ return InitializeOutput{
+ Result: res,
+ }, metadata, err
+}
+
+type decoratedInitializeHandler struct {
+ Next InitializeHandler
+ With InitializeMiddleware
+}
+
+var _ InitializeHandler = (*decoratedInitializeHandler)(nil)
+
+func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) (
+ out InitializeOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleInitialize(ctx, in, h.Next)
+}
+
+// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler.
+type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error)
+
+// HandleInitialize calls the wrapped function with the provided arguments.
+func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) {
+ return i(ctx, in)
+}
+
+var _ InitializeHandler = InitializeHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go
new file mode 100644
index 000000000..a4ce4bee3
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go
@@ -0,0 +1,219 @@
+package middleware
+
+import "context"
+
+// SerializeInput provides the input parameters for the SerializeMiddleware to
+// consume. SerializeMiddleware may modify the Request value before forwarding
+// SerializeInput along to the next SerializeHandler. The Parameters member
+// should not be modified by SerializeMiddleware, InitializeMiddleware should
+// be responsible for modifying the provided Parameter value.
+type SerializeInput struct {
+ Parameters interface{}
+ Request interface{}
+}
+
+// SerializeOutput provides the result returned by the next SerializeHandler.
+type SerializeOutput struct {
+ Result interface{}
+}
+
+// SerializeHandler provides the interface for the next handler the
+// SerializeMiddleware will call in the middleware chain.
+type SerializeHandler interface {
+ HandleSerialize(ctx context.Context, in SerializeInput) (
+ out SerializeOutput, metadata Metadata, err error,
+ )
+}
+
+// SerializeMiddleware provides the interface for middleware specific to the
+// serialize step. Delegates to the next SerializeHandler for further
+// processing.
+type SerializeMiddleware interface {
+ // ID returns a unique ID for the middleware in the SerializeStep. The step does not
+ // allow duplicate IDs.
+ ID() string
+
+ // HandleSerialize invokes the middleware behavior which must delegate to the next handler
+ // for the middleware chain to continue. The method must return a result or
+ // error to its caller.
+ HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) (
+ out SerializeOutput, metadata Metadata, err error,
+ )
+}
+
+// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID
+// provided, and the func to be invoked.
+func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware {
+ return serializeMiddlewareFunc{
+ id: id,
+ fn: fn,
+ }
+}
+
+type serializeMiddlewareFunc struct {
+ // Unique ID for the middleware.
+ id string
+
+ // Middleware function to be called.
+ fn func(context.Context, SerializeInput, SerializeHandler) (
+ SerializeOutput, Metadata, error,
+ )
+}
+
+// ID returns the unique ID for the middleware.
+func (s serializeMiddlewareFunc) ID() string { return s.id }
+
+// HandleSerialize invokes the middleware Fn.
+func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) (
+ out SerializeOutput, metadata Metadata, err error,
+) {
+ return s.fn(ctx, in, next)
+}
+
+var _ SerializeMiddleware = (serializeMiddlewareFunc{})
+
+// SerializeStep provides the ordered grouping of SerializeMiddleware to be
+// invoked on a handler.
+type SerializeStep struct {
+ newRequest func() interface{}
+ ids *orderedIDs
+}
+
+// NewSerializeStep returns a SerializeStep ready to have middleware for
+// initialization added to it. The newRequest func parameter is used to
+// initialize the transport specific request for the stack SerializeStep to
+// serialize the input parameters into.
+func NewSerializeStep(newRequest func() interface{}) *SerializeStep {
+ return &SerializeStep{
+ ids: newOrderedIDs(baseOrderedItems),
+ newRequest: newRequest,
+ }
+}
+
+var _ Middleware = (*SerializeStep)(nil)
+
+// ID returns the unique ID of the step as a middleware.
+func (s *SerializeStep) ID() string {
+ return "Serialize stack step"
+}
+
+// HandleMiddleware invokes the middleware by decorating the next handler
+// provided. Returns the result of the middleware and handler being invoked.
+//
+// Implements Middleware interface.
+func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) (
+ out interface{}, metadata Metadata, err error,
+) {
+ order := s.ids.GetOrder()
+
+ var h SerializeHandler = serializeWrapHandler{Next: next}
+ for i := len(order) - 1; i >= 0; i-- {
+ h = decoratedSerializeHandler{
+ Next: h,
+ With: order[i].(SerializeMiddleware),
+ }
+ }
+
+ sIn := SerializeInput{
+ Parameters: in,
+ Request: s.newRequest(),
+ }
+
+ res, metadata, err := h.HandleSerialize(ctx, sIn)
+ return res.Result, metadata, err
+}
+
+// Get retrieves the middleware identified by id. If the middleware is not present, returns false.
+func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) {
+ get, ok := s.ids.Get(id)
+ if !ok {
+ return nil, false
+ }
+ return get.(SerializeMiddleware), ok
+}
+
+// Add injects the middleware to the relative position of the middleware group.
+// Returns an error if the middleware already exists.
+func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error {
+ return s.ids.Add(m, pos)
+}
+
+// Insert injects the middleware relative to an existing middleware ID.
+// Returns error if the original middleware does not exist, or the middleware
+// being added already exists.
+func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error {
+ return s.ids.Insert(m, relativeTo, pos)
+}
+
+// Swap removes the middleware by id, replacing it with the new middleware.
+// Returns the middleware removed, or error if the middleware to be removed
+// doesn't exist.
+func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) {
+ removed, err := s.ids.Swap(id, m)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(SerializeMiddleware), nil
+}
+
+// Remove removes the middleware by id. Returns error if the middleware
+// doesn't exist.
+func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) {
+ removed, err := s.ids.Remove(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return removed.(SerializeMiddleware), nil
+}
+
+// List returns a list of the middleware in the step.
+func (s *SerializeStep) List() []string {
+ return s.ids.List()
+}
+
+// Clear removes all middleware in the step.
+func (s *SerializeStep) Clear() {
+ s.ids.Clear()
+}
+
+type serializeWrapHandler struct {
+ Next Handler
+}
+
+var _ SerializeHandler = (*serializeWrapHandler)(nil)
+
+// Implements SerializeHandler, converts types and delegates to underlying
+// generic handler.
+func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) (
+ out SerializeOutput, metadata Metadata, err error,
+) {
+ res, metadata, err := w.Next.Handle(ctx, in.Request)
+ return SerializeOutput{
+ Result: res,
+ }, metadata, err
+}
+
+type decoratedSerializeHandler struct {
+ Next SerializeHandler
+ With SerializeMiddleware
+}
+
+var _ SerializeHandler = (*decoratedSerializeHandler)(nil)
+
+func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) (
+ out SerializeOutput, metadata Metadata, err error,
+) {
+ return h.With.HandleSerialize(ctx, in, h.Next)
+}
+
+// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler.
+type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error)
+
+// HandleSerialize calls the wrapped function with the provided arguments.
+func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) {
+ return s(ctx, in)
+}
+
+var _ SerializeHandler = SerializeHandlerFunc(nil)
diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml
new file mode 100644
index 000000000..aac582fa2
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/modman.toml
@@ -0,0 +1,9 @@
+[dependencies]
+
+[modules]
+
+ [modules.codegen]
+ no_tag = true
+
+ [modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"]
+ no_tag = true
diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go
new file mode 100644
index 000000000..004d78f21
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go
@@ -0,0 +1,30 @@
+package requestcompression
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+)
+
+func gzipCompress(input io.Reader) ([]byte, error) {
+ var b bytes.Buffer
+ w, err := gzip.NewWriterLevel(&b, gzip.DefaultCompression)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create gzip writer, %v", err)
+ }
+
+ inBytes, err := io.ReadAll(input)
+ if err != nil {
+ return nil, fmt.Errorf("failed read payload to compress, %v", err)
+ }
+
+ if _, err = w.Write(inBytes); err != nil {
+ return nil, fmt.Errorf("failed to write payload to be compressed, %v", err)
+ }
+ if err = w.Close(); err != nil {
+ return nil, fmt.Errorf("failed to flush payload being compressed, %v", err)
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go
new file mode 100644
index 000000000..06c16afc1
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go
@@ -0,0 +1,52 @@
+package requestcompression
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "net/http"
+)
+
+const captureUncompressedRequestID = "CaptureUncompressedRequest"
+
+// AddCaptureUncompressedRequestMiddleware captures http request before compress encoding for check
+func AddCaptureUncompressedRequestMiddleware(stack *middleware.Stack, buf *bytes.Buffer) error {
+ return stack.Serialize.Insert(&captureUncompressedRequestMiddleware{
+ buf: buf,
+ }, "RequestCompression", middleware.Before)
+}
+
+type captureUncompressedRequestMiddleware struct {
+ req *http.Request
+ buf *bytes.Buffer
+ bytes []byte
+}
+
+// ID returns id of the captureUncompressedRequestMiddleware
+func (*captureUncompressedRequestMiddleware) ID() string {
+ return captureUncompressedRequestID
+}
+
+// HandleSerialize captures request payload before it is compressed by request compression middleware
+func (m *captureUncompressedRequestMiddleware) HandleSerialize(ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ output middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return output, metadata, fmt.Errorf("error when retrieving http request")
+ }
+
+ _, err = io.Copy(m.buf, request.GetStream())
+ if err != nil {
+ return output, metadata, fmt.Errorf("error when copying http request stream: %q", err)
+ }
+ if err = request.RewindStream(); err != nil {
+ return output, metadata, fmt.Errorf("error when rewinding request stream: %q", err)
+ }
+
+ return next.HandleSerialize(ctx, input)
+}
diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go
new file mode 100644
index 000000000..7c4147603
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go
@@ -0,0 +1,103 @@
+// Package requestcompression implements runtime support for smithy-modeled
+// request compression.
+//
+// This package is designated as private and is intended for use only by the
+// smithy client runtime. The exported API therein is not considered stable and
+// is subject to breaking changes without notice.
+package requestcompression
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+const MaxRequestMinCompressSizeBytes = 10485760
+
+// Enumeration values for supported compress Algorithms.
+const (
+ GZIP = "gzip"
+)
+
+type compressFunc func(io.Reader) ([]byte, error)
+
+var allowedAlgorithms = map[string]compressFunc{
+ GZIP: gzipCompress,
+}
+
+// AddRequestCompression add requestCompression middleware to op stack
+func AddRequestCompression(stack *middleware.Stack, disabled bool, minBytes int64, algorithms []string) error {
+ return stack.Serialize.Add(&requestCompression{
+ disableRequestCompression: disabled,
+ requestMinCompressSizeBytes: minBytes,
+ compressAlgorithms: algorithms,
+ }, middleware.After)
+}
+
+type requestCompression struct {
+ disableRequestCompression bool
+ requestMinCompressSizeBytes int64
+ compressAlgorithms []string
+}
+
+// ID returns the ID of the middleware
+func (m requestCompression) ID() string {
+ return "RequestCompression"
+}
+
+// HandleSerialize gzip compress the request's stream/body if enabled by config fields
+func (m requestCompression) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.disableRequestCompression {
+ return next.HandleSerialize(ctx, in)
+ }
+ // still need to check requestMinCompressSizeBytes in case it is out of range after service client config
+ if m.requestMinCompressSizeBytes < 0 || m.requestMinCompressSizeBytes > MaxRequestMinCompressSizeBytes {
+ return out, metadata, fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", m.requestMinCompressSizeBytes)
+ }
+
+ req, ok := in.Request.(*http.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ for _, algorithm := range m.compressAlgorithms {
+ compressFunc := allowedAlgorithms[algorithm]
+ if compressFunc != nil {
+ if stream := req.GetStream(); stream != nil {
+ size, found, err := req.StreamLength()
+ if err != nil {
+ return out, metadata, fmt.Errorf("error while finding request stream length, %v", err)
+ } else if !found || size < m.requestMinCompressSizeBytes {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ compressedBytes, err := compressFunc(stream)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to compress request stream, %v", err)
+ }
+
+ var newReq *http.Request
+ if newReq, err = req.SetStream(bytes.NewReader(compressedBytes)); err != nil {
+ return out, metadata, fmt.Errorf("failed to set request stream, %v", err)
+ }
+ *req = *newReq
+
+ if val := req.Header.Get("Content-Encoding"); val != "" {
+ req.Header.Set("Content-Encoding", fmt.Sprintf("%s, %s", val, algorithm))
+ } else {
+ req.Header.Set("Content-Encoding", algorithm)
+ }
+ }
+ break
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go
new file mode 100644
index 000000000..68df4c4e0
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/properties.go
@@ -0,0 +1,69 @@
+package smithy
+
+import "maps"
+
+// PropertiesReader provides an interface for reading metadata from the
+// underlying metadata container.
+type PropertiesReader interface {
+ Get(key any) any
+}
+
+// Properties provides storing and reading metadata values. Keys may be any
+// comparable value type. Get and Set will panic if a key is not comparable.
+//
+// The zero value for a Properties instance is ready for reads/writes without
+// any additional initialization.
+type Properties struct {
+ values map[any]any
+}
+
+// Get attempts to retrieve the value the key points to. Returns nil if the
+// key was not found.
+//
+// Panics if key type is not comparable.
+func (m *Properties) Get(key any) any {
+ m.lazyInit()
+ return m.values[key]
+}
+
+// Set stores the value pointed to by the key. If a value already exists at
+// that key it will be replaced with the new value.
+//
+// Panics if the key type is not comparable.
+func (m *Properties) Set(key, value any) {
+ m.lazyInit()
+ m.values[key] = value
+}
+
+// Has returns whether the key exists in the metadata.
+//
+// Panics if the key type is not comparable.
+func (m *Properties) Has(key any) bool {
+ m.lazyInit()
+ _, ok := m.values[key]
+ return ok
+}
+
+// SetAll accepts all of the given Properties into the receiver, overwriting
+// any existing keys in the case of conflicts.
+func (m *Properties) SetAll(other *Properties) {
+ if other.values == nil {
+ return
+ }
+
+ m.lazyInit()
+ for k, v := range other.values {
+ m.values[k] = v
+ }
+}
+
+// Values returns a shallow clone of the property set's values.
+func (m *Properties) Values() map[any]any {
+ return maps.Clone(m.values)
+}
+
+func (m *Properties) lazyInit() {
+ if m.values == nil {
+ m.values = map[any]any{}
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go
new file mode 100644
index 000000000..bc1f69961
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/ptr/doc.go
@@ -0,0 +1,5 @@
+// Package ptr provides utilities for converting scalar literal type values to and from pointers inline.
+package ptr
+
+//go:generate go run -tags codegen generate.go
+//go:generate gofmt -w -s .
diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go
new file mode 100644
index 000000000..a2845bb2c
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go
@@ -0,0 +1,601 @@
+// Code generated by smithy-go/ptr/generate.go DO NOT EDIT.
+package ptr
+
+import (
+ "time"
+)
+
+// ToBool returns bool value dereferenced if the passed
+// in pointer was not nil. Returns a bool zero value if the
+// pointer was nil.
+func ToBool(p *bool) (v bool) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToBoolSlice returns a slice of bool values, that are
+// dereferenced if the passed in pointer was not nil. Returns a bool
+// zero value if the pointer was nil.
+func ToBoolSlice(vs []*bool) []bool {
+ ps := make([]bool, len(vs))
+ for i, v := range vs {
+ ps[i] = ToBool(v)
+ }
+
+ return ps
+}
+
+// ToBoolMap returns a map of bool values, that are
+// dereferenced if the passed in pointer was not nil. The bool
+// zero value is used if the pointer was nil.
+func ToBoolMap(vs map[string]*bool) map[string]bool {
+ ps := make(map[string]bool, len(vs))
+ for k, v := range vs {
+ ps[k] = ToBool(v)
+ }
+
+ return ps
+}
+
+// ToByte returns byte value dereferenced if the passed
+// in pointer was not nil. Returns a byte zero value if the
+// pointer was nil.
+func ToByte(p *byte) (v byte) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToByteSlice returns a slice of byte values, that are
+// dereferenced if the passed in pointer was not nil. Returns a byte
+// zero value if the pointer was nil.
+func ToByteSlice(vs []*byte) []byte {
+ ps := make([]byte, len(vs))
+ for i, v := range vs {
+ ps[i] = ToByte(v)
+ }
+
+ return ps
+}
+
+// ToByteMap returns a map of byte values, that are
+// dereferenced if the passed in pointer was not nil. The byte
+// zero value is used if the pointer was nil.
+func ToByteMap(vs map[string]*byte) map[string]byte {
+ ps := make(map[string]byte, len(vs))
+ for k, v := range vs {
+ ps[k] = ToByte(v)
+ }
+
+ return ps
+}
+
+// ToString returns string value dereferenced if the passed
+// in pointer was not nil. Returns a string zero value if the
+// pointer was nil.
+func ToString(p *string) (v string) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToStringSlice returns a slice of string values, that are
+// dereferenced if the passed in pointer was not nil. Returns a string
+// zero value if the pointer was nil.
+func ToStringSlice(vs []*string) []string {
+ ps := make([]string, len(vs))
+ for i, v := range vs {
+ ps[i] = ToString(v)
+ }
+
+ return ps
+}
+
+// ToStringMap returns a map of string values, that are
+// dereferenced if the passed in pointer was not nil. The string
+// zero value is used if the pointer was nil.
+func ToStringMap(vs map[string]*string) map[string]string {
+ ps := make(map[string]string, len(vs))
+ for k, v := range vs {
+ ps[k] = ToString(v)
+ }
+
+ return ps
+}
+
+// ToInt returns int value dereferenced if the passed
+// in pointer was not nil. Returns a int zero value if the
+// pointer was nil.
+func ToInt(p *int) (v int) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToIntSlice returns a slice of int values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int
+// zero value if the pointer was nil.
+func ToIntSlice(vs []*int) []int {
+ ps := make([]int, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt(v)
+ }
+
+ return ps
+}
+
+// ToIntMap returns a map of int values, that are
+// dereferenced if the passed in pointer was not nil. The int
+// zero value is used if the pointer was nil.
+func ToIntMap(vs map[string]*int) map[string]int {
+ ps := make(map[string]int, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt(v)
+ }
+
+ return ps
+}
+
+// ToInt8 returns int8 value dereferenced if the passed
+// in pointer was not nil. Returns a int8 zero value if the
+// pointer was nil.
+func ToInt8(p *int8) (v int8) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToInt8Slice returns a slice of int8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int8
+// zero value if the pointer was nil.
+func ToInt8Slice(vs []*int8) []int8 {
+ ps := make([]int8, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt8(v)
+ }
+
+ return ps
+}
+
+// ToInt8Map returns a map of int8 values, that are
+// dereferenced if the passed in pointer was not nil. The int8
+// zero value is used if the pointer was nil.
+func ToInt8Map(vs map[string]*int8) map[string]int8 {
+ ps := make(map[string]int8, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt8(v)
+ }
+
+ return ps
+}
+
+// ToInt16 returns int16 value dereferenced if the passed
+// in pointer was not nil. Returns a int16 zero value if the
+// pointer was nil.
+func ToInt16(p *int16) (v int16) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToInt16Slice returns a slice of int16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int16
+// zero value if the pointer was nil.
+func ToInt16Slice(vs []*int16) []int16 {
+ ps := make([]int16, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt16(v)
+ }
+
+ return ps
+}
+
+// ToInt16Map returns a map of int16 values, that are
+// dereferenced if the passed in pointer was not nil. The int16
+// zero value is used if the pointer was nil.
+func ToInt16Map(vs map[string]*int16) map[string]int16 {
+ ps := make(map[string]int16, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt16(v)
+ }
+
+ return ps
+}
+
+// ToInt32 returns int32 value dereferenced if the passed
+// in pointer was not nil. Returns a int32 zero value if the
+// pointer was nil.
+func ToInt32(p *int32) (v int32) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToInt32Slice returns a slice of int32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int32
+// zero value if the pointer was nil.
+func ToInt32Slice(vs []*int32) []int32 {
+ ps := make([]int32, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt32(v)
+ }
+
+ return ps
+}
+
+// ToInt32Map returns a map of int32 values, that are
+// dereferenced if the passed in pointer was not nil. The int32
+// zero value is used if the pointer was nil.
+func ToInt32Map(vs map[string]*int32) map[string]int32 {
+ ps := make(map[string]int32, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt32(v)
+ }
+
+ return ps
+}
+
+// ToInt64 returns int64 value dereferenced if the passed
+// in pointer was not nil. Returns a int64 zero value if the
+// pointer was nil.
+func ToInt64(p *int64) (v int64) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToInt64Slice returns a slice of int64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a int64
+// zero value if the pointer was nil.
+func ToInt64Slice(vs []*int64) []int64 {
+ ps := make([]int64, len(vs))
+ for i, v := range vs {
+ ps[i] = ToInt64(v)
+ }
+
+ return ps
+}
+
+// ToInt64Map returns a map of int64 values, that are
+// dereferenced if the passed in pointer was not nil. The int64
+// zero value is used if the pointer was nil.
+func ToInt64Map(vs map[string]*int64) map[string]int64 {
+ ps := make(map[string]int64, len(vs))
+ for k, v := range vs {
+ ps[k] = ToInt64(v)
+ }
+
+ return ps
+}
+
+// ToUint returns uint value dereferenced if the passed
+// in pointer was not nil. Returns a uint zero value if the
+// pointer was nil.
+func ToUint(p *uint) (v uint) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUintSlice returns a slice of uint values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint
+// zero value if the pointer was nil.
+func ToUintSlice(vs []*uint) []uint {
+ ps := make([]uint, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint(v)
+ }
+
+ return ps
+}
+
+// ToUintMap returns a map of uint values, that are
+// dereferenced if the passed in pointer was not nil. The uint
+// zero value is used if the pointer was nil.
+func ToUintMap(vs map[string]*uint) map[string]uint {
+ ps := make(map[string]uint, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint(v)
+ }
+
+ return ps
+}
+
+// ToUint8 returns uint8 value dereferenced if the passed
+// in pointer was not nil. Returns a uint8 zero value if the
+// pointer was nil.
+func ToUint8(p *uint8) (v uint8) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUint8Slice returns a slice of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint8
+// zero value if the pointer was nil.
+func ToUint8Slice(vs []*uint8) []uint8 {
+ ps := make([]uint8, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint8(v)
+ }
+
+ return ps
+}
+
+// ToUint8Map returns a map of uint8 values, that are
+// dereferenced if the passed in pointer was not nil. The uint8
+// zero value is used if the pointer was nil.
+func ToUint8Map(vs map[string]*uint8) map[string]uint8 {
+ ps := make(map[string]uint8, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint8(v)
+ }
+
+ return ps
+}
+
+// ToUint16 returns uint16 value dereferenced if the passed
+// in pointer was not nil. Returns a uint16 zero value if the
+// pointer was nil.
+func ToUint16(p *uint16) (v uint16) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUint16Slice returns a slice of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint16
+// zero value if the pointer was nil.
+func ToUint16Slice(vs []*uint16) []uint16 {
+ ps := make([]uint16, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint16(v)
+ }
+
+ return ps
+}
+
+// ToUint16Map returns a map of uint16 values, that are
+// dereferenced if the passed in pointer was not nil. The uint16
+// zero value is used if the pointer was nil.
+func ToUint16Map(vs map[string]*uint16) map[string]uint16 {
+ ps := make(map[string]uint16, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint16(v)
+ }
+
+ return ps
+}
+
+// ToUint32 returns uint32 value dereferenced if the passed
+// in pointer was not nil. Returns a uint32 zero value if the
+// pointer was nil.
+func ToUint32(p *uint32) (v uint32) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUint32Slice returns a slice of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint32
+// zero value if the pointer was nil.
+func ToUint32Slice(vs []*uint32) []uint32 {
+ ps := make([]uint32, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint32(v)
+ }
+
+ return ps
+}
+
+// ToUint32Map returns a map of uint32 values, that are
+// dereferenced if the passed in pointer was not nil. The uint32
+// zero value is used if the pointer was nil.
+func ToUint32Map(vs map[string]*uint32) map[string]uint32 {
+ ps := make(map[string]uint32, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint32(v)
+ }
+
+ return ps
+}
+
+// ToUint64 returns uint64 value dereferenced if the passed
+// in pointer was not nil. Returns a uint64 zero value if the
+// pointer was nil.
+func ToUint64(p *uint64) (v uint64) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToUint64Slice returns a slice of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a uint64
+// zero value if the pointer was nil.
+func ToUint64Slice(vs []*uint64) []uint64 {
+ ps := make([]uint64, len(vs))
+ for i, v := range vs {
+ ps[i] = ToUint64(v)
+ }
+
+ return ps
+}
+
+// ToUint64Map returns a map of uint64 values, that are
+// dereferenced if the passed in pointer was not nil. The uint64
+// zero value is used if the pointer was nil.
+func ToUint64Map(vs map[string]*uint64) map[string]uint64 {
+ ps := make(map[string]uint64, len(vs))
+ for k, v := range vs {
+ ps[k] = ToUint64(v)
+ }
+
+ return ps
+}
+
+// ToFloat32 returns float32 value dereferenced if the passed
+// in pointer was not nil. Returns a float32 zero value if the
+// pointer was nil.
+func ToFloat32(p *float32) (v float32) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToFloat32Slice returns a slice of float32 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float32
+// zero value if the pointer was nil.
+func ToFloat32Slice(vs []*float32) []float32 {
+ ps := make([]float32, len(vs))
+ for i, v := range vs {
+ ps[i] = ToFloat32(v)
+ }
+
+ return ps
+}
+
+// ToFloat32Map returns a map of float32 values, that are
+// dereferenced if the passed in pointer was not nil. The float32
+// zero value is used if the pointer was nil.
+func ToFloat32Map(vs map[string]*float32) map[string]float32 {
+ ps := make(map[string]float32, len(vs))
+ for k, v := range vs {
+ ps[k] = ToFloat32(v)
+ }
+
+ return ps
+}
+
+// ToFloat64 returns float64 value dereferenced if the passed
+// in pointer was not nil. Returns a float64 zero value if the
+// pointer was nil.
+func ToFloat64(p *float64) (v float64) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToFloat64Slice returns a slice of float64 values, that are
+// dereferenced if the passed in pointer was not nil. Returns a float64
+// zero value if the pointer was nil.
+func ToFloat64Slice(vs []*float64) []float64 {
+ ps := make([]float64, len(vs))
+ for i, v := range vs {
+ ps[i] = ToFloat64(v)
+ }
+
+ return ps
+}
+
+// ToFloat64Map returns a map of float64 values, that are
+// dereferenced if the passed in pointer was not nil. The float64
+// zero value is used if the pointer was nil.
+func ToFloat64Map(vs map[string]*float64) map[string]float64 {
+ ps := make(map[string]float64, len(vs))
+ for k, v := range vs {
+ ps[k] = ToFloat64(v)
+ }
+
+ return ps
+}
+
+// ToTime returns time.Time value dereferenced if the passed
+// in pointer was not nil. Returns a time.Time zero value if the
+// pointer was nil.
+func ToTime(p *time.Time) (v time.Time) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToTimeSlice returns a slice of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Time
+// zero value if the pointer was nil.
+func ToTimeSlice(vs []*time.Time) []time.Time {
+ ps := make([]time.Time, len(vs))
+ for i, v := range vs {
+ ps[i] = ToTime(v)
+ }
+
+ return ps
+}
+
+// ToTimeMap returns a map of time.Time values, that are
+// dereferenced if the passed in pointer was not nil. The time.Time
+// zero value is used if the pointer was nil.
+func ToTimeMap(vs map[string]*time.Time) map[string]time.Time {
+ ps := make(map[string]time.Time, len(vs))
+ for k, v := range vs {
+ ps[k] = ToTime(v)
+ }
+
+ return ps
+}
+
+// ToDuration returns time.Duration value dereferenced if the passed
+// in pointer was not nil. Returns a time.Duration zero value if the
+// pointer was nil.
+func ToDuration(p *time.Duration) (v time.Duration) {
+ if p == nil {
+ return v
+ }
+
+ return *p
+}
+
+// ToDurationSlice returns a slice of time.Duration values, that are
+// dereferenced if the passed in pointer was not nil. Returns a time.Duration
+// zero value if the pointer was nil.
+func ToDurationSlice(vs []*time.Duration) []time.Duration {
+ ps := make([]time.Duration, len(vs))
+ for i, v := range vs {
+ ps[i] = ToDuration(v)
+ }
+
+ return ps
+}
+
+// ToDurationMap returns a map of time.Duration values, that are
+// dereferenced if the passed in pointer was not nil. The time.Duration
+// zero value is used if the pointer was nil.
+func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration {
+ ps := make(map[string]time.Duration, len(vs))
+ for k, v := range vs {
+ ps[k] = ToDuration(v)
+ }
+
+ return ps
+}
diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go
new file mode 100644
index 000000000..97f01011e
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go
@@ -0,0 +1,83 @@
+//go:build codegen
+// +build codegen
+
+package ptr
+
+import "strings"
+
+func GetScalars() Scalars {
+ return Scalars{
+ {Type: "bool"},
+ {Type: "byte"},
+ {Type: "string"},
+ {Type: "int"},
+ {Type: "int8"},
+ {Type: "int16"},
+ {Type: "int32"},
+ {Type: "int64"},
+ {Type: "uint"},
+ {Type: "uint8"},
+ {Type: "uint16"},
+ {Type: "uint32"},
+ {Type: "uint64"},
+ {Type: "float32"},
+ {Type: "float64"},
+ {Type: "Time", Import: &Import{Path: "time"}},
+ {Type: "Duration", Import: &Import{Path: "time"}},
+ }
+}
+
+// Import provides the import path and optional alias
+type Import struct {
+ Path string
+ Alias string
+}
+
+// Package returns the Go package name for the import. Returns alias if set.
+func (i Import) Package() string {
+ if v := i.Alias; len(v) != 0 {
+ return v
+ }
+
+ if v := i.Path; len(v) != 0 {
+ parts := strings.Split(v, "/")
+ pkg := parts[len(parts)-1]
+ return pkg
+ }
+
+ return ""
+}
+
+// Scalar provides the definition of a type to generate pointer utilities for.
+type Scalar struct {
+ Type string
+ Import *Import
+}
+
+// Name returns the exported function name for the type.
+func (t Scalar) Name() string {
+ return strings.Title(t.Type)
+}
+
+// Symbol returns the scalar's Go symbol with path if needed.
+func (t Scalar) Symbol() string {
+ if t.Import != nil {
+ return t.Import.Package() + "." + t.Type
+ }
+ return t.Type
+}
+
+// Scalars is a list of scalars.
+type Scalars []Scalar
+
+// Imports returns all imports for the scalars.
+func (ts Scalars) Imports() []*Import {
+ imports := []*Import{}
+ for _, t := range ts {
+ if v := t.Import; v != nil {
+ imports = append(imports, v)
+ }
+ }
+
+ return imports
+}
diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go
new file mode 100644
index 000000000..0bfbbecbd
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go
@@ -0,0 +1,499 @@
+// Code generated by smithy-go/ptr/generate.go DO NOT EDIT.
+package ptr
+
+import (
+ "time"
+)
+
+// Bool returns a pointer value for the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolSlice returns a slice of bool pointers from the values
+// passed in.
+func BoolSlice(vs []bool) []*bool {
+ ps := make([]*bool, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// BoolMap returns a map of bool pointers from the values
+// passed in.
+func BoolMap(vs map[string]bool) map[string]*bool {
+ ps := make(map[string]*bool, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Byte returns a pointer value for the byte value passed in.
+func Byte(v byte) *byte {
+ return &v
+}
+
+// ByteSlice returns a slice of byte pointers from the values
+// passed in.
+func ByteSlice(vs []byte) []*byte {
+ ps := make([]*byte, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// ByteMap returns a map of byte pointers from the values
+// passed in.
+func ByteMap(vs map[string]byte) map[string]*byte {
+ ps := make(map[string]*byte, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// String returns a pointer value for the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringSlice returns a slice of string pointers from the values
+// passed in.
+func StringSlice(vs []string) []*string {
+ ps := make([]*string, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// StringMap returns a map of string pointers from the values
+// passed in.
+func StringMap(vs map[string]string) map[string]*string {
+ ps := make(map[string]*string, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int returns a pointer value for the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntSlice returns a slice of int pointers from the values
+// passed in.
+func IntSlice(vs []int) []*int {
+ ps := make([]*int, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// IntMap returns a map of int pointers from the values
+// passed in.
+func IntMap(vs map[string]int) map[string]*int {
+ ps := make(map[string]*int, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int8 returns a pointer value for the int8 value passed in.
+func Int8(v int8) *int8 {
+ return &v
+}
+
+// Int8Slice returns a slice of int8 pointers from the values
+// passed in.
+func Int8Slice(vs []int8) []*int8 {
+ ps := make([]*int8, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Int8Map returns a map of int8 pointers from the values
+// passed in.
+func Int8Map(vs map[string]int8) map[string]*int8 {
+ ps := make(map[string]*int8, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int16 returns a pointer value for the int16 value passed in.
+func Int16(v int16) *int16 {
+ return &v
+}
+
+// Int16Slice returns a slice of int16 pointers from the values
+// passed in.
+func Int16Slice(vs []int16) []*int16 {
+ ps := make([]*int16, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Int16Map returns a map of int16 pointers from the values
+// passed in.
+func Int16Map(vs map[string]int16) map[string]*int16 {
+ ps := make(map[string]*int16, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int32 returns a pointer value for the int32 value passed in.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int32Slice returns a slice of int32 pointers from the values
+// passed in.
+func Int32Slice(vs []int32) []*int32 {
+ ps := make([]*int32, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Int32Map returns a map of int32 pointers from the values
+// passed in.
+func Int32Map(vs map[string]int32) map[string]*int32 {
+ ps := make(map[string]*int32, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Int64 returns a pointer value for the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Slice returns a slice of int64 pointers from the values
+// passed in.
+func Int64Slice(vs []int64) []*int64 {
+ ps := make([]*int64, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Int64Map returns a map of int64 pointers from the values
+// passed in.
+func Int64Map(vs map[string]int64) map[string]*int64 {
+ ps := make(map[string]*int64, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint returns a pointer value for the uint value passed in.
+func Uint(v uint) *uint {
+ return &v
+}
+
+// UintSlice returns a slice of uint pointers from the values
+// passed in.
+func UintSlice(vs []uint) []*uint {
+ ps := make([]*uint, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// UintMap returns a map of uint pointers from the values
+// passed in.
+func UintMap(vs map[string]uint) map[string]*uint {
+ ps := make(map[string]*uint, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint8 returns a pointer value for the uint8 value passed in.
+func Uint8(v uint8) *uint8 {
+ return &v
+}
+
+// Uint8Slice returns a slice of uint8 pointers from the values
+// passed in.
+func Uint8Slice(vs []uint8) []*uint8 {
+ ps := make([]*uint8, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Uint8Map returns a map of uint8 pointers from the values
+// passed in.
+func Uint8Map(vs map[string]uint8) map[string]*uint8 {
+ ps := make(map[string]*uint8, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint16 returns a pointer value for the uint16 value passed in.
+func Uint16(v uint16) *uint16 {
+ return &v
+}
+
+// Uint16Slice returns a slice of uint16 pointers from the values
+// passed in.
+func Uint16Slice(vs []uint16) []*uint16 {
+ ps := make([]*uint16, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Uint16Map returns a map of uint16 pointers from the values
+// passed in.
+func Uint16Map(vs map[string]uint16) map[string]*uint16 {
+ ps := make(map[string]*uint16, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint32 returns a pointer value for the uint32 value passed in.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint32Slice returns a slice of uint32 pointers from the values
+// passed in.
+func Uint32Slice(vs []uint32) []*uint32 {
+ ps := make([]*uint32, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Uint32Map returns a map of uint32 pointers from the values
+// passed in.
+func Uint32Map(vs map[string]uint32) map[string]*uint32 {
+ ps := make(map[string]*uint32, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Uint64 returns a pointer value for the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// Uint64Slice returns a slice of uint64 pointers from the values
+// passed in.
+func Uint64Slice(vs []uint64) []*uint64 {
+ ps := make([]*uint64, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Uint64Map returns a map of uint64 pointers from the values
+// passed in.
+func Uint64Map(vs map[string]uint64) map[string]*uint64 {
+ ps := make(map[string]*uint64, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Float32 returns a pointer value for the float32 value passed in.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float32Slice returns a slice of float32 pointers from the values
+// passed in.
+func Float32Slice(vs []float32) []*float32 {
+ ps := make([]*float32, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Float32Map returns a map of float32 pointers from the values
+// passed in.
+func Float32Map(vs map[string]float32) map[string]*float32 {
+ ps := make(map[string]*float32, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Float64 returns a pointer value for the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Slice returns a slice of float64 pointers from the values
+// passed in.
+func Float64Slice(vs []float64) []*float64 {
+ ps := make([]*float64, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// Float64Map returns a map of float64 pointers from the values
+// passed in.
+func Float64Map(vs map[string]float64) map[string]*float64 {
+ ps := make(map[string]*float64, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Time returns a pointer value for the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeSlice returns a slice of time.Time pointers from the values
+// passed in.
+func TimeSlice(vs []time.Time) []*time.Time {
+ ps := make([]*time.Time, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// TimeMap returns a map of time.Time pointers from the values
+// passed in.
+func TimeMap(vs map[string]time.Time) map[string]*time.Time {
+ ps := make(map[string]*time.Time, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
+
+// Duration returns a pointer value for the time.Duration value passed in.
+func Duration(v time.Duration) *time.Duration {
+ return &v
+}
+
+// DurationSlice returns a slice of time.Duration pointers from the values
+// passed in.
+func DurationSlice(vs []time.Duration) []*time.Duration {
+ ps := make([]*time.Duration, len(vs))
+ for i, v := range vs {
+ vv := v
+ ps[i] = &vv
+ }
+
+ return ps
+}
+
+// DurationMap returns a map of time.Duration pointers from the values
+// passed in.
+func DurationMap(vs map[string]time.Duration) map[string]*time.Duration {
+ ps := make(map[string]*time.Duration, len(vs))
+ for k, v := range vs {
+ vv := v
+ ps[k] = &vv
+ }
+
+ return ps
+}
diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go
new file mode 100644
index 000000000..f8b25d562
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/rand/doc.go
@@ -0,0 +1,3 @@
+// Package rand provides utilities for creating and working with random value
+// generators.
+package rand
diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go
new file mode 100644
index 000000000..9c479f62b
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/rand/rand.go
@@ -0,0 +1,31 @@
+package rand
+
+import (
+ "crypto/rand"
+ "fmt"
+ "io"
+ "math/big"
+)
+
+func init() {
+ Reader = rand.Reader
+}
+
+// Reader provides a random reader that can reset during testing.
+var Reader io.Reader
+
+// Int63n returns a int64 between zero and value of max, read from an io.Reader source.
+func Int63n(reader io.Reader, max int64) (int64, error) {
+ bi, err := rand.Int(reader, big.NewInt(max))
+ if err != nil {
+ return 0, fmt.Errorf("failed to read random value, %w", err)
+ }
+
+ return bi.Int64(), nil
+}
+
+// CryptoRandInt63n returns a random int64 between zero and value of max
+// obtained from the crypto rand source.
+func CryptoRandInt63n(max int64) (int64, error) {
+ return Int63n(Reader, max)
+}
diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go
new file mode 100644
index 000000000..dc81cbc68
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/rand/uuid.go
@@ -0,0 +1,87 @@
+package rand
+
+import (
+ "encoding/hex"
+ "io"
+)
+
+const dash byte = '-'
+
+// UUIDIdempotencyToken provides a utility to get idempotency tokens in the
+// UUID format.
+type UUIDIdempotencyToken struct {
+ uuid *UUID
+}
+
+// NewUUIDIdempotencyToken returns a idempotency token provider returning
+// tokens in the UUID random format using the reader provided.
+func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken {
+ return &UUIDIdempotencyToken{uuid: NewUUID(r)}
+}
+
+// GetIdempotencyToken returns a random UUID value for Idempotency token.
+func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) {
+ return u.uuid.GetUUID()
+}
+
+// UUID provides computing random UUID version 4 values from a random source
+// reader.
+type UUID struct {
+ randSrc io.Reader
+}
+
+// NewUUID returns an initialized UUID value that can be used to retrieve
+// random UUID version 4 values.
+func NewUUID(r io.Reader) *UUID {
+ return &UUID{randSrc: r}
+}
+
+// GetUUID returns a random UUID version 4 string representation sourced from the random reader the
+// UUID was created with. Returns an error if unable to compute the UUID.
+func (r *UUID) GetUUID() (string, error) {
+ var b [16]byte
+ if _, err := io.ReadFull(r.randSrc, b[:]); err != nil {
+ return "", err
+ }
+ r.makeUUIDv4(b[:])
+ return format(b), nil
+}
+
+// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the
+// UUID was created with. Returns an error if unable to compute the UUID.
+func (r *UUID) GetBytes() (u []byte, err error) {
+ u = make([]byte, 16)
+ if _, err = io.ReadFull(r.randSrc, u); err != nil {
+ return u, err
+ }
+ r.makeUUIDv4(u)
+ return u, nil
+}
+
+func (r *UUID) makeUUIDv4(u []byte) {
+ // 13th character is "4"
+ u[6] = (u[6] & 0x0f) | 0x40 // Version 4
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0
+}
+
+// Format returns the canonical text representation of a UUID.
+// This implementation is optimized to not use fmt.
+// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d
+func format(u [16]byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+
+ var scratch [36]byte
+
+ hex.Encode(scratch[:8], u[0:4])
+ scratch[8] = dash
+ hex.Encode(scratch[9:13], u[4:6])
+ scratch[13] = dash
+ hex.Encode(scratch[14:18], u[6:8])
+ scratch[18] = dash
+ hex.Encode(scratch[19:23], u[8:10])
+ scratch[23] = dash
+ hex.Encode(scratch[24:], u[10:])
+
+ return string(scratch[:])
+}
diff --git a/vendor/github.com/aws/smithy-go/sync/error.go b/vendor/github.com/aws/smithy-go/sync/error.go
new file mode 100644
index 000000000..629207672
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/sync/error.go
@@ -0,0 +1,53 @@
+package sync
+
+import "sync"
+
+// OnceErr wraps the behavior of recording an error
+// once and signal on a channel when this has occurred.
+// Signaling is done by closing of the channel.
+//
+// Type is safe for concurrent usage.
+type OnceErr struct {
+ mu sync.RWMutex
+ err error
+ ch chan struct{}
+}
+
+// NewOnceErr return a new OnceErr
+func NewOnceErr() *OnceErr {
+ return &OnceErr{
+ ch: make(chan struct{}, 1),
+ }
+}
+
+// Err acquires a read-lock and returns an
+// error if one has been set.
+func (e *OnceErr) Err() error {
+ e.mu.RLock()
+ err := e.err
+ e.mu.RUnlock()
+
+ return err
+}
+
+// SetError acquires a write-lock and will set
+// the underlying error value if one has not been set.
+func (e *OnceErr) SetError(err error) {
+ if err == nil {
+ return
+ }
+
+ e.mu.Lock()
+ if e.err == nil {
+ e.err = err
+ close(e.ch)
+ }
+ e.mu.Unlock()
+}
+
+// ErrorSet returns a channel that will be used to signal
+// that an error has been set. This channel will be closed
+// when the error value has been set for OnceErr.
+func (e *OnceErr) ErrorSet() <-chan struct{} {
+ return e.ch
+}
diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go
new file mode 100644
index 000000000..b552a09f8
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/time/time.go
@@ -0,0 +1,134 @@
+package time
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "strings"
+ "time"
+)
+
+const (
+ // dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6
+ dateTimeFormatInput = "2006-01-02T15:04:05.999999999Z"
+ dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999"
+ dateTimeFormatOutput = "2006-01-02T15:04:05.999Z"
+
+ // httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1
+ // IMF-fixdate with no UTC offset.
+ httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
+ // Additional formats needed for compatibility.
+ httpDateFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT"
+ httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
+)
+
+var millisecondFloat = big.NewFloat(1e3)
+
+// FormatDateTime formats value as a date-time, (RFC3339 section 5.6)
+//
+// Example: 1985-04-12T23:20:50.52Z
+func FormatDateTime(value time.Time) string {
+ return value.UTC().Format(dateTimeFormatOutput)
+}
+
+// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6)
+//
+// Example: 1985-04-12T23:20:50.52Z
+func ParseDateTime(value string) (time.Time, error) {
+ return tryParse(value,
+ dateTimeFormatInput,
+ dateTimeFormatInputNoZ,
+ time.RFC3339Nano,
+ time.RFC3339,
+ )
+}
+
+// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate)
+//
+// Example: Tue, 29 Apr 2014 18:30:38 GMT
+func FormatHTTPDate(value time.Time) string {
+ return value.UTC().Format(httpDateFormat)
+}
+
+// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate)
+//
+// Example: Tue, 29 Apr 2014 18:30:38 GMT
+func ParseHTTPDate(value string) (time.Time, error) {
+ return tryParse(value,
+ httpDateFormat,
+ httpDateFormatSingleDigitDay,
+ httpDateFormatSingleDigitDayTwoDigitYear,
+ time.RFC850,
+ time.ANSIC,
+ )
+}
+
+// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision
+//
+// Example: 1515531081.123
+func FormatEpochSeconds(value time.Time) float64 {
+ ms := value.UnixNano() / int64(time.Millisecond)
+ return float64(ms) / 1e3
+}
+
+// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision
+//
+// Example: 1515531081.123
+func ParseEpochSeconds(value float64) time.Time {
+ f := big.NewFloat(value)
+ f = f.Mul(f, millisecondFloat)
+ i, _ := f.Int64()
+ // Offset to `UTC` because time.Unix returns the time value based on system
+ // local setting.
+ return time.Unix(0, i*1e6).UTC()
+}
+
+func tryParse(v string, formats ...string) (time.Time, error) {
+ var errs parseErrors
+ for _, f := range formats {
+ t, err := time.Parse(f, v)
+ if err != nil {
+ errs = append(errs, parseError{
+ Format: f,
+ Err: err,
+ })
+ continue
+ }
+ return t, nil
+ }
+
+ return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs)
+}
+
+type parseErrors []parseError
+
+func (es parseErrors) Error() string {
+ var s strings.Builder
+ for _, e := range es {
+ fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err)
+ }
+
+ return "parse errors:" + s.String()
+}
+
+type parseError struct {
+ Format string
+ Err error
+}
+
+// SleepWithContext will wait for the timer duration to expire, or until the context
+// is canceled. Whichever happens first. If the context is canceled the
+// Context's error will be returned.
+func SleepWithContext(ctx context.Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/smithy-go/tracing/context.go b/vendor/github.com/aws/smithy-go/tracing/context.go
new file mode 100644
index 000000000..a404ed9d3
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/tracing/context.go
@@ -0,0 +1,96 @@
+package tracing
+
+import "context"
+
+type (
+ operationTracerKey struct{}
+ spanLineageKey struct{}
+)
+
+// GetSpan returns the active trace Span on the context.
+//
+// The boolean in the return indicates whether a Span was actually in the
+// context, but a no-op implementation will be returned if not, so callers
+// can generally disregard the boolean unless they wish to explicitly confirm
+// presence/absence of a Span.
+func GetSpan(ctx context.Context) (Span, bool) {
+ lineage := getLineage(ctx)
+ if len(lineage) == 0 {
+ return nopSpan{}, false
+ }
+
+ return lineage[len(lineage)-1], true
+}
+
+// WithSpan sets the active trace Span on the context.
+func WithSpan(parent context.Context, span Span) context.Context {
+ lineage := getLineage(parent)
+ if len(lineage) == 0 {
+ return context.WithValue(parent, spanLineageKey{}, []Span{span})
+ }
+
+ lineage = append(lineage, span)
+ return context.WithValue(parent, spanLineageKey{}, lineage)
+}
+
+// PopSpan pops the current Span off the context, setting the active Span on
+// the returned Context back to its parent and returning the REMOVED one.
+//
+// PopSpan on a context with no active Span will return a no-op instance.
+//
+// This is mostly necessary for the runtime to manage base trace spans due to
+// the wrapped-function nature of the middleware stack. End-users of Smithy
+// clients SHOULD NOT generally be using this API.
+func PopSpan(parent context.Context) (context.Context, Span) {
+ lineage := getLineage(parent)
+ if len(lineage) == 0 {
+ return parent, nopSpan{}
+ }
+
+ span := lineage[len(lineage)-1]
+ lineage = lineage[:len(lineage)-1]
+ return context.WithValue(parent, spanLineageKey{}, lineage), span
+}
+
+func getLineage(ctx context.Context) []Span {
+ v := ctx.Value(spanLineageKey{})
+ if v == nil {
+ return nil
+ }
+
+ return v.([]Span)
+}
+
+// GetOperationTracer returns the embedded operation-scoped Tracer on a
+// Context.
+//
+// The boolean in the return indicates whether a Tracer was actually in the
+// context, but a no-op implementation will be returned if not, so callers
+// can generally disregard the boolean unless they wish to explicitly confirm
+// presence/absence of a Tracer.
+func GetOperationTracer(ctx context.Context) (Tracer, bool) {
+ v := ctx.Value(operationTracerKey{})
+ if v == nil {
+ return nopTracer{}, false
+ }
+
+ return v.(Tracer), true
+}
+
+// WithOperationTracer returns a child Context embedding the given Tracer.
+//
+// The runtime will use this embed a scoped tracer for client operations,
+// Smithy/SDK client callers DO NOT need to do this explicitly.
+func WithOperationTracer(parent context.Context, tracer Tracer) context.Context {
+ return context.WithValue(parent, operationTracerKey{}, tracer)
+}
+
+// StartSpan is a convenience API for creating tracing Spans from a Context.
+//
+// StartSpan uses the operation-scoped Tracer, previously stored using
+// [WithOperationTracer], to start the Span. If a Tracer has not been embedded
+// the returned Span will be a no-op implementation.
+func StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
+ tracer, _ := GetOperationTracer(ctx)
+ return tracer.StartSpan(ctx, name, opts...)
+}
diff --git a/vendor/github.com/aws/smithy-go/tracing/nop.go b/vendor/github.com/aws/smithy-go/tracing/nop.go
new file mode 100644
index 000000000..573d28b1c
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/tracing/nop.go
@@ -0,0 +1,32 @@
+package tracing
+
+import "context"
+
+// NopTracerProvider is a no-op tracing implementation.
+type NopTracerProvider struct{}
+
+var _ TracerProvider = (*NopTracerProvider)(nil)
+
+// Tracer returns a tracer which creates no-op spans.
+func (NopTracerProvider) Tracer(string, ...TracerOption) Tracer {
+ return nopTracer{}
+}
+
+type nopTracer struct{}
+
+var _ Tracer = (*nopTracer)(nil)
+
+func (nopTracer) StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
+ return ctx, nopSpan{}
+}
+
+type nopSpan struct{}
+
+var _ Span = (*nopSpan)(nil)
+
+func (nopSpan) Name() string { return "" }
+func (nopSpan) Context() SpanContext { return SpanContext{} }
+func (nopSpan) AddEvent(string, ...EventOption) {}
+func (nopSpan) SetProperty(any, any) {}
+func (nopSpan) SetStatus(SpanStatus) {}
+func (nopSpan) End() {}
diff --git a/vendor/github.com/aws/smithy-go/tracing/tracing.go b/vendor/github.com/aws/smithy-go/tracing/tracing.go
new file mode 100644
index 000000000..089ed3932
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/tracing/tracing.go
@@ -0,0 +1,95 @@
+// Package tracing defines tracing APIs to be used by Smithy clients.
+package tracing
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go"
+)
+
+// SpanStatus records the "success" state of an observed span.
+type SpanStatus int
+
+// Enumeration of SpanStatus.
+const (
+ SpanStatusUnset SpanStatus = iota
+ SpanStatusOK
+ SpanStatusError
+)
+
+// SpanKind indicates the nature of the work being performed.
+type SpanKind int
+
+// Enumeration of SpanKind.
+const (
+ SpanKindInternal SpanKind = iota
+ SpanKindClient
+ SpanKindServer
+ SpanKindProducer
+ SpanKindConsumer
+)
+
+// TracerProvider is the entry point for creating client traces.
+type TracerProvider interface {
+ Tracer(scope string, opts ...TracerOption) Tracer
+}
+
+// TracerOption applies configuration to a tracer.
+type TracerOption func(o *TracerOptions)
+
+// TracerOptions represent configuration for tracers.
+type TracerOptions struct {
+ Properties smithy.Properties
+}
+
+// Tracer is the entry point for creating observed client Spans.
+//
+// Spans created by tracers propagate by existing on the Context. Consumers of
+// the API can use [GetSpan] to pull the active Span from a Context.
+//
+// Creation of child Spans is implicit through Context persistence. If
+// CreateSpan is called with a Context that holds a Span, the result will be a
+// child of that Span.
+type Tracer interface {
+ StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span)
+}
+
+// SpanOption applies configuration to a span.
+type SpanOption func(o *SpanOptions)
+
+// SpanOptions represent configuration for span events.
+type SpanOptions struct {
+ Kind SpanKind
+ Properties smithy.Properties
+}
+
+// Span records a conceptually individual unit of work that takes place in a
+// Smithy client operation.
+type Span interface {
+ Name() string
+ Context() SpanContext
+ AddEvent(name string, opts ...EventOption)
+ SetStatus(status SpanStatus)
+ SetProperty(k, v any)
+ End()
+}
+
+// EventOption applies configuration to a span event.
+type EventOption func(o *EventOptions)
+
+// EventOptions represent configuration for span events.
+type EventOptions struct {
+ Properties smithy.Properties
+}
+
+// SpanContext uniquely identifies a Span.
+type SpanContext struct {
+ TraceID string
+ SpanID string
+ IsRemote bool
+}
+
+// IsValid is true when a span has nonzero trace and span IDs.
+func (ctx *SpanContext) IsValid() bool {
+ return len(ctx.TraceID) != 0 && len(ctx.SpanID) != 0
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth.go b/vendor/github.com/aws/smithy-go/transport/http/auth.go
new file mode 100644
index 000000000..58e1ab5ef
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/auth.go
@@ -0,0 +1,21 @@
+package http
+
+import (
+ "context"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+)
+
+// AuthScheme defines an HTTP authentication scheme.
+type AuthScheme interface {
+ SchemeID() string
+ IdentityResolver(auth.IdentityResolverOptions) auth.IdentityResolver
+ Signer() Signer
+}
+
+// Signer defines the interface through which HTTP requests are supplemented
+// with an Identity.
+type Signer interface {
+ SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go
new file mode 100644
index 000000000..d60cf2a60
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go
@@ -0,0 +1,45 @@
+package http
+
+import (
+ "context"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+)
+
+// NewAnonymousScheme returns the anonymous HTTP auth scheme.
+func NewAnonymousScheme() AuthScheme {
+ return &authScheme{
+ schemeID: auth.SchemeIDAnonymous,
+ signer: &nopSigner{},
+ }
+}
+
+// authScheme is parameterized to generically implement the exported AuthScheme
+// interface
+type authScheme struct {
+ schemeID string
+ signer Signer
+}
+
+var _ AuthScheme = (*authScheme)(nil)
+
+func (s *authScheme) SchemeID() string {
+ return s.schemeID
+}
+
+func (s *authScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver {
+ return o.GetIdentityResolver(s.schemeID)
+}
+
+func (s *authScheme) Signer() Signer {
+ return s.signer
+}
+
+type nopSigner struct{}
+
+var _ Signer = (*nopSigner)(nil)
+
+func (*nopSigner) SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error {
+ return nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go
new file mode 100644
index 000000000..bc4ad6e79
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go
@@ -0,0 +1,70 @@
+package http
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+const contentMD5Header = "Content-Md5"
+
+// contentMD5Checksum provides a middleware to compute and set
+// content-md5 checksum for a http request
+type contentMD5Checksum struct {
+}
+
+// AddContentChecksumMiddleware adds checksum middleware to middleware's
+// build step.
+func AddContentChecksumMiddleware(stack *middleware.Stack) error {
+ // This middleware must be executed before request body is set.
+ return stack.Build.Add(&contentMD5Checksum{}, middleware.Before)
+}
+
+// ID returns the identifier for the checksum middleware
+func (m *contentMD5Checksum) ID() string { return "ContentChecksum" }
+
+// HandleBuild adds behavior to compute md5 checksum and add content-md5 header
+// on http request
+func (m *contentMD5Checksum) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // if Content-MD5 header is already present, return
+ if v := req.Header.Get(contentMD5Header); len(v) != 0 {
+ return next.HandleBuild(ctx, in)
+ }
+
+ // fetch the request stream.
+ stream := req.GetStream()
+ // compute checksum if payload is explicit
+ if stream != nil {
+ if !req.IsStreamSeekable() {
+ return out, metadata, fmt.Errorf(
+ "unseekable stream is not supported for computing md5 checksum")
+ }
+
+ v, err := computeMD5Checksum(stream)
+ if err != nil {
+ return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err)
+ }
+
+ // reset the request stream
+ if err := req.RewindStream(); err != nil {
+ return out, metadata, fmt.Errorf(
+ "error rewinding request stream after computing md5 checksum, %w", err)
+ }
+
+ // set the 'Content-MD5' header
+ req.Header.Set(contentMD5Header, string(v))
+ }
+
+ // set md5 header value
+ return next.HandleBuild(ctx, in)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go
new file mode 100644
index 000000000..0fceae81d
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/client.go
@@ -0,0 +1,161 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+)
+
+// ClientDo provides the interface for custom HTTP client implementations.
+type ClientDo interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+// ClientDoFunc provides a helper to wrap a function as an HTTP client for
+// round tripping requests.
+type ClientDoFunc func(*http.Request) (*http.Response, error)
+
+// Do will invoke the underlying func, returning the result.
+func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) {
+ return fn(r)
+}
+
+// ClientHandler wraps a client that implements the HTTP Do method. Standard
+// implementation is http.Client.
+type ClientHandler struct {
+ client ClientDo
+
+ Meter metrics.Meter // For HTTP client metrics.
+}
+
+// NewClientHandler returns an initialized middleware handler for the client.
+//
+// Deprecated: Use [NewClientHandlerWithOptions].
+func NewClientHandler(client ClientDo) ClientHandler {
+ return NewClientHandlerWithOptions(client)
+}
+
+// NewClientHandlerWithOptions returns an initialized middleware handler for the client
+// with applied options.
+func NewClientHandlerWithOptions(client ClientDo, opts ...func(*ClientHandler)) ClientHandler {
+ h := ClientHandler{
+ client: client,
+ }
+ for _, opt := range opts {
+ opt(&h)
+ }
+ if h.Meter == nil {
+ h.Meter = metrics.NopMeterProvider{}.Meter("")
+ }
+ return h
+}
+
+// Handle implements the middleware Handler interface, that will invoke the
+// underlying HTTP client. Requires the input to be a Smithy *Request. Returns
+// a smithy *Response, or error if the request failed.
+func (c ClientHandler) Handle(ctx context.Context, input interface{}) (
+ out interface{}, metadata middleware.Metadata, err error,
+) {
+ ctx, span := tracing.StartSpan(ctx, "DoHTTPRequest")
+ defer span.End()
+
+ ctx, client, err := withMetrics(ctx, c.client, c.Meter)
+ if err != nil {
+ return nil, metadata, fmt.Errorf("instrument with HTTP metrics: %w", err)
+ }
+
+ req, ok := input.(*Request)
+ if !ok {
+ return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input)
+ }
+
+ builtRequest := req.Build(ctx)
+ if err := ValidateEndpointHost(builtRequest.Host); err != nil {
+ return nil, metadata, err
+ }
+
+ span.SetProperty("http.method", req.Method)
+ span.SetProperty("http.request_content_length", -1) // at least indicate unknown
+ length, ok, err := req.StreamLength()
+ if err != nil {
+ return nil, metadata, err
+ }
+ if ok {
+ span.SetProperty("http.request_content_length", length)
+ }
+
+ resp, err := client.Do(builtRequest)
+ if resp == nil {
+ // Ensure a http response value is always present to prevent unexpected
+ // panics.
+ resp = &http.Response{
+ Header: http.Header{},
+ Body: http.NoBody,
+ }
+ }
+ if err != nil {
+ err = &RequestSendError{Err: err}
+
+ // Override the error with a context canceled error, if that was canceled.
+ select {
+ case <-ctx.Done():
+ err = &smithy.CanceledError{Err: ctx.Err()}
+ default:
+ }
+ }
+
+ // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner.
+ // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the
+ // stream reference so that it can be safely reused.
+ if builtRequest.Body != nil {
+ _ = builtRequest.Body.Close()
+ }
+
+ span.SetProperty("net.protocol.version", fmt.Sprintf("%d.%d", resp.ProtoMajor, resp.ProtoMinor))
+ span.SetProperty("http.status_code", resp.StatusCode)
+ span.SetProperty("http.response_content_length", resp.ContentLength)
+
+ return &Response{Response: resp}, metadata, err
+}
+
+// RequestSendError provides a generic request transport error. This error
+// should wrap errors making HTTP client requests.
+//
+// The ClientHandler will wrap the HTTP client's error if the client request
+// fails, and did not fail because of context canceled.
+type RequestSendError struct {
+ Err error
+}
+
+// ConnectionError returns that the error is related to not being able to send
+// the request, or receive a response from the service.
+func (e *RequestSendError) ConnectionError() bool {
+ return true
+}
+
+// Unwrap returns the underlying error, if there was one.
+func (e *RequestSendError) Unwrap() error {
+ return e.Err
+}
+
+func (e *RequestSendError) Error() string {
+ return fmt.Sprintf("request send failed, %v", e.Err)
+}
+
+// NopClient provides a client that ignores the request, and returns an empty
+// successful HTTP response value.
+type NopClient struct{}
+
+// Do ignores the request and returns a 200 status empty response.
+func (NopClient) Do(r *http.Request) (*http.Response, error) {
+ return &http.Response{
+ StatusCode: 200,
+ Header: http.Header{},
+ Body: http.NoBody,
+ }, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go
new file mode 100644
index 000000000..07366ac85
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/doc.go
@@ -0,0 +1,5 @@
+/*
+Package http provides the HTTP transport client and request/response types
+needed to round trip API operation calls with an service.
+*/
+package http
diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go
new file mode 100644
index 000000000..cbc9deb4d
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go
@@ -0,0 +1,163 @@
+package http
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) {
+ values := make([]string, 0, len(vs))
+
+ for i := 0; i < len(vs); i++ {
+ parts, err := splitFn(vs[i])
+ if err != nil {
+ return nil, err
+ }
+ values = append(values, parts...)
+ }
+
+ return values, nil
+}
+
+// SplitHeaderListValues attempts to split the elements of the slice by commas,
+// and return a list of all values separated. Returns error if unable to
+// separate the values.
+func SplitHeaderListValues(vs []string) ([]string, error) {
+ return splitHeaderListValues(vs, quotedCommaSplit)
+}
+
+func quotedCommaSplit(v string) (parts []string, err error) {
+ v = strings.TrimSpace(v)
+
+ expectMore := true
+ for i := 0; i < len(v); i++ {
+ if unicode.IsSpace(rune(v[i])) {
+ continue
+ }
+ expectMore = false
+
+ // leading space in part is ignored.
+ // Start of value must be non-space, or quote.
+ //
+ // - If quote, enter quoted mode, find next non-escaped quote to
+ // terminate the value.
+ // - Otherwise, find next comma to terminate value.
+
+ remaining := v[i:]
+
+ var value string
+ var valueLen int
+ if remaining[0] == '"' {
+ //------------------------------
+ // Quoted value
+ //------------------------------
+ var j int
+ var skipQuote bool
+ for j += 1; j < len(remaining); j++ {
+ if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) {
+ skipQuote = !skipQuote
+ continue
+ }
+ if remaining[j] == '"' {
+ break
+ }
+ }
+ if j == len(remaining) || j == 1 {
+ return nil, fmt.Errorf("value %v missing closing double quote",
+ remaining)
+ }
+ valueLen = j + 1
+
+ tail := remaining[valueLen:]
+ var k int
+ for ; k < len(tail); k++ {
+ if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' {
+ return nil, fmt.Errorf("value %v has non-space trailing characters",
+ remaining)
+ }
+ if tail[k] == ',' {
+ expectMore = true
+ break
+ }
+ }
+ value = remaining[:valueLen]
+ value, err = strconv.Unquote(value)
+ if err != nil {
+ return nil, fmt.Errorf("failed to unquote value %v, %w", value, err)
+ }
+
+ // Pad valueLen to include trailing space(s) so `i` is updated correctly.
+ valueLen += k
+
+ } else {
+ //------------------------------
+ // Unquoted value
+ //------------------------------
+
+ // Index of the next comma is the length of the value, or end of string.
+ valueLen = strings.Index(remaining, ",")
+ if valueLen != -1 {
+ expectMore = true
+ } else {
+ valueLen = len(remaining)
+ }
+ value = strings.TrimSpace(remaining[:valueLen])
+ }
+
+ i += valueLen
+ parts = append(parts, value)
+
+ }
+
+ if expectMore {
+ parts = append(parts, "")
+ }
+
+ return parts, nil
+}
+
+// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date
+// timestamp values in the slice by commas, and return a list of all values
+// separated. The split is aware of the HTTP-Date timestamp format, and will skip
+// comma within the timestamp value. Returns an error if unable to split the
+// timestamp values.
+func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) {
+ return splitHeaderListValues(vs, splitHTTPDateHeaderValue)
+}
+
+func splitHTTPDateHeaderValue(v string) ([]string, error) {
+ if n := strings.Count(v, ","); n <= 1 {
+ // Nothing to do if only contains a no, or single HTTPDate value
+ return []string{v}, nil
+ } else if n%2 == 0 {
+ return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v)
+ }
+
+ var parts []string
+ var i, j int
+
+ var doSplit bool
+ for ; i < len(v); i++ {
+ if v[i] == ',' {
+ if doSplit {
+ doSplit = false
+ parts = append(parts, strings.TrimSpace(v[j:i]))
+ j = i + 1
+ } else {
+ // Skip the first comma in the timestamp value since that
+ // separates the day from the rest of the timestamp.
+ //
+ // Tue, 17 Dec 2019 23:48:18 GMT
+ doSplit = true
+ }
+ }
+ }
+ // Add final part
+ if j < len(v) {
+ parts = append(parts, strings.TrimSpace(v[j:]))
+ }
+
+ return parts, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go
new file mode 100644
index 000000000..db9801bea
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/host.go
@@ -0,0 +1,89 @@
+package http
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+)
+
+// ValidateEndpointHost validates that the host string passed in is a valid RFC
+// 3986 host. Returns error if the host is not valid.
+func ValidateEndpointHost(host string) error {
+ var errors strings.Builder
+ var hostname string
+ var port string
+ var err error
+
+ if strings.Contains(host, ":") {
+ hostname, port, err = net.SplitHostPort(host)
+ if err != nil {
+ errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host))
+ errors.WriteString(err.Error())
+ }
+
+ if !ValidPortNumber(port) {
+ errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port))
+ }
+ } else {
+ hostname = host
+ }
+
+ labels := strings.Split(hostname, ".")
+ for i, label := range labels {
+ if i == len(labels)-1 && len(label) == 0 {
+ // Allow trailing dot for FQDN hosts.
+ continue
+ }
+
+ if !ValidHostLabel(label) {
+ errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ")
+ errors.WriteString(label)
+ }
+ }
+
+ if len(hostname) == 0 && len(port) != 0 {
+ errors.WriteString("\nendpoint host with port must not be empty")
+ }
+
+ if len(hostname) > 255 {
+ errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname)))
+ }
+
+ if len(errors.String()) > 0 {
+ return fmt.Errorf("invalid endpoint host%s", errors.String())
+ }
+ return nil
+}
+
+// ValidPortNumber returns whether the port is valid RFC 3986 port.
+func ValidPortNumber(port string) bool {
+ i, err := strconv.Atoi(port)
+ if err != nil {
+ return false
+ }
+
+ if i < 0 || i > 65535 {
+ return false
+ }
+ return true
+}
+
+// ValidHostLabel returns whether the label is a valid RFC 3986 host label.
+func ValidHostLabel(label string) bool {
+ if l := len(label); l == 0 || l > 63 {
+ return false
+ }
+ for _, r := range label {
+ switch {
+ case r >= '0' && r <= '9':
+ case r >= 'A' && r <= 'Z':
+ case r >= 'a' && r <= 'z':
+ case r == '-':
+ default:
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go
new file mode 100644
index 000000000..e21f2632a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go
@@ -0,0 +1,321 @@
+package http
+
+import (
+ "context"
+)
+
+func icopy[T any](v []T) []T {
+ s := make([]T, len(v))
+ copy(s, v)
+ return s
+}
+
+// InterceptorContext is all the information available in different
+// interceptors.
+//
+// Not all information is available in each interceptor, see each interface
+// definition for more details.
+type InterceptorContext struct {
+ Input any
+ Request *Request
+
+ Output any
+ Response *Response
+}
+
+// InterceptorRegistry holds a list of operation interceptors.
+//
+// Interceptors allow callers to insert custom behavior at well-defined points
+// within a client's operation lifecycle.
+//
+// # Interceptor context
+//
+// All interceptors are invoked with a context object that contains input and
+// output containers for the operation. The individual fields that are
+// available will depend on what the interceptor is and, in certain
+// interceptors, how far the operation was able to progress. See the
+// documentation for each interface definition for more information about field
+// availability.
+//
+// Implementations MUST NOT directly mutate the values of the fields in the
+// interceptor context. They are free to mutate the existing values _pointed
+// to_ by those fields, however.
+//
+// # Returning errors
+//
+// All interceptors can return errors. If an interceptor returns an error
+// _before_ the client's retry loop, the operation will fail immediately. If
+// one returns an error _within_ the retry loop, the error WILL be considered
+// according to the client's retry policy.
+//
+// # Adding interceptors
+//
+// Idiomatically you will simply use one of the Add() receiver methods to
+// register interceptors as desired. However, the list for each interface is
+// exported on the registry struct and the caller is free to manipulate it
+// directly, for example, to register a number of interceptors all at once, or
+// to remove one that was previously registered.
+//
+// The base SDK client WILL NOT add any interceptors. SDK operations and
+// customizations are implemented in terms of middleware.
+//
+// Modifications to the registry will not persist across operation calls when
+// using per-operation functional options. This means you can register
+// interceptors on a per-operation basis without affecting other operations.
+type InterceptorRegistry struct {
+ BeforeExecution []BeforeExecutionInterceptor
+ BeforeSerialization []BeforeSerializationInterceptor
+ AfterSerialization []AfterSerializationInterceptor
+ BeforeRetryLoop []BeforeRetryLoopInterceptor
+ BeforeAttempt []BeforeAttemptInterceptor
+ BeforeSigning []BeforeSigningInterceptor
+ AfterSigning []AfterSigningInterceptor
+ BeforeTransmit []BeforeTransmitInterceptor
+ AfterTransmit []AfterTransmitInterceptor
+ BeforeDeserialization []BeforeDeserializationInterceptor
+ AfterDeserialization []AfterDeserializationInterceptor
+ AfterAttempt []AfterAttemptInterceptor
+ AfterExecution []AfterExecutionInterceptor
+}
+
+// Copy returns a deep copy of the registry. This is used by SDK clients on
+// each operation call in order to prevent per-op config mutation from
+// persisting.
+func (i *InterceptorRegistry) Copy() InterceptorRegistry {
+ return InterceptorRegistry{
+ BeforeExecution: icopy(i.BeforeExecution),
+ BeforeSerialization: icopy(i.BeforeSerialization),
+ AfterSerialization: icopy(i.AfterSerialization),
+ BeforeRetryLoop: icopy(i.BeforeRetryLoop),
+ BeforeAttempt: icopy(i.BeforeAttempt),
+ BeforeSigning: icopy(i.BeforeSigning),
+ AfterSigning: icopy(i.AfterSigning),
+ BeforeTransmit: icopy(i.BeforeTransmit),
+ AfterTransmit: icopy(i.AfterTransmit),
+ BeforeDeserialization: icopy(i.BeforeDeserialization),
+ AfterDeserialization: icopy(i.AfterDeserialization),
+ AfterAttempt: icopy(i.AfterAttempt),
+ AfterExecution: icopy(i.AfterExecution),
+ }
+}
+
+// AddBeforeExecution registers the provided BeforeExecutionInterceptor.
+func (i *InterceptorRegistry) AddBeforeExecution(v BeforeExecutionInterceptor) {
+ i.BeforeExecution = append(i.BeforeExecution, v)
+}
+
+// AddBeforeSerialization registers the provided BeforeSerializationInterceptor.
+func (i *InterceptorRegistry) AddBeforeSerialization(v BeforeSerializationInterceptor) {
+ i.BeforeSerialization = append(i.BeforeSerialization, v)
+}
+
+// AddAfterSerialization registers the provided AfterSerializationInterceptor.
+func (i *InterceptorRegistry) AddAfterSerialization(v AfterSerializationInterceptor) {
+ i.AfterSerialization = append(i.AfterSerialization, v)
+}
+
+// AddBeforeRetryLoop registers the provided BeforeRetryLoopInterceptor.
+func (i *InterceptorRegistry) AddBeforeRetryLoop(v BeforeRetryLoopInterceptor) {
+ i.BeforeRetryLoop = append(i.BeforeRetryLoop, v)
+}
+
+// AddBeforeAttempt registers the provided BeforeAttemptInterceptor.
+func (i *InterceptorRegistry) AddBeforeAttempt(v BeforeAttemptInterceptor) {
+ i.BeforeAttempt = append(i.BeforeAttempt, v)
+}
+
+// AddBeforeSigning registers the provided BeforeSigningInterceptor.
+func (i *InterceptorRegistry) AddBeforeSigning(v BeforeSigningInterceptor) {
+ i.BeforeSigning = append(i.BeforeSigning, v)
+}
+
+// AddAfterSigning registers the provided AfterSigningInterceptor.
+func (i *InterceptorRegistry) AddAfterSigning(v AfterSigningInterceptor) {
+ i.AfterSigning = append(i.AfterSigning, v)
+}
+
+// AddBeforeTransmit registers the provided BeforeTransmitInterceptor.
+func (i *InterceptorRegistry) AddBeforeTransmit(v BeforeTransmitInterceptor) {
+ i.BeforeTransmit = append(i.BeforeTransmit, v)
+}
+
+// AddAfterTransmit registers the provided AfterTransmitInterceptor.
+func (i *InterceptorRegistry) AddAfterTransmit(v AfterTransmitInterceptor) {
+ i.AfterTransmit = append(i.AfterTransmit, v)
+}
+
+// AddBeforeDeserialization registers the provided BeforeDeserializationInterceptor.
+func (i *InterceptorRegistry) AddBeforeDeserialization(v BeforeDeserializationInterceptor) {
+ i.BeforeDeserialization = append(i.BeforeDeserialization, v)
+}
+
+// AddAfterDeserialization registers the provided AfterDeserializationInterceptor.
+func (i *InterceptorRegistry) AddAfterDeserialization(v AfterDeserializationInterceptor) {
+ i.AfterDeserialization = append(i.AfterDeserialization, v)
+}
+
+// AddAfterAttempt registers the provided AfterAttemptInterceptor.
+func (i *InterceptorRegistry) AddAfterAttempt(v AfterAttemptInterceptor) {
+ i.AfterAttempt = append(i.AfterAttempt, v)
+}
+
+// AddAfterExecution registers the provided AfterExecutionInterceptor.
+func (i *InterceptorRegistry) AddAfterExecution(v AfterExecutionInterceptor) {
+ i.AfterExecution = append(i.AfterExecution, v)
+}
+
+// BeforeExecutionInterceptor runs before anything else in the operation
+// lifecycle.
+//
+// Available InterceptorContext fields:
+// - Input
+type BeforeExecutionInterceptor interface {
+ BeforeExecution(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeSerializationInterceptor runs before the operation input is serialized
+// into its transport request.
+//
+// Serialization occurs before the operation's retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+type BeforeSerializationInterceptor interface {
+ BeforeSerialization(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterSerializationInterceptor runs after the operation input is serialized
+// into its transport request.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type AfterSerializationInterceptor interface {
+ AfterSerialization(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeRetryLoopInterceptor runs right before the operation enters the retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type BeforeRetryLoopInterceptor interface {
+ BeforeRetryLoop(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeAttemptInterceptor runs right before every attempt in the retry loop.
+//
+// If this interceptor returns an error, AfterAttempt interceptors WILL NOT be
+// invoked.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type BeforeAttemptInterceptor interface {
+ BeforeAttempt(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeSigningInterceptor runs right before the request is signed.
+//
+// Signing occurs within the operation's retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type BeforeSigningInterceptor interface {
+ BeforeSigning(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterSigningInterceptor runs right after the request is signed.
+//
+// It is unsafe to modify the outgoing HTTP request at or past this hook, since
+// doing so may invalidate the signature of the request.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type AfterSigningInterceptor interface {
+ AfterSigning(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeTransmitInterceptor runs right before the HTTP request is sent.
+//
+// HTTP transmit occurs within the operation's retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type BeforeTransmitInterceptor interface {
+ BeforeTransmit(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterTransmitInterceptor runs right after the HTTP response is received.
+//
+// It will always be invoked when a response is received, regardless of its
+// status code. Conversely, it WILL NOT be invoked if the HTTP round-trip was
+// not successful, e.g. because of a DNS resolution error
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+// - Response
+type AfterTransmitInterceptor interface {
+ AfterTransmit(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeDeserializationInterceptor runs right before the incoming HTTP response
+// is deserialized.
+//
+// This interceptor IS NOT invoked if the HTTP round-trip was not successful.
+//
+// Deserialization occurs within the operation's retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+// - Response
+type BeforeDeserializationInterceptor interface {
+ BeforeDeserialization(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterDeserializationInterceptor runs right after the incoming HTTP response
+// is deserialized. This hook is invoked regardless of whether the deserialized
+// result was an error.
+//
+// This interceptor IS NOT invoked if the HTTP round-trip was not successful.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Output (IF the operation had a success-level response)
+// - Request
+// - Response
+type AfterDeserializationInterceptor interface {
+ AfterDeserialization(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterAttemptInterceptor runs right after the incoming HTTP response
+// is deserialized. This hook is invoked regardless of whether the deserialized
+// result was an error, or if another interceptor within the retry loop
+// returned an error.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Output (IF the operation had a success-level response)
+// - Request (IF the operation did not return an error during serialization)
+// - Response (IF the operation was able to transmit the HTTP request)
+type AfterAttemptInterceptor interface {
+ AfterAttempt(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterExecutionInterceptor runs after everything else. It runs regardless of
+// how far the operation progressed in its lifecycle, and regardless of whether
+// the operation succeeded or failed.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Output (IF the operation had a success-level response)
+// - Request (IF the operation did not return an error during serialization)
+// - Response (IF the operation was able to transmit the HTTP request)
+type AfterExecutionInterceptor interface {
+ AfterExecution(ctx context.Context, in *InterceptorContext) error
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go
new file mode 100644
index 000000000..2cc4b57f8
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go
@@ -0,0 +1,325 @@
+package http
+
+import (
+ "context"
+ "errors"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type ictxKey struct{}
+
+func withIctx(ctx context.Context) context.Context {
+ return middleware.WithStackValue(ctx, ictxKey{}, &InterceptorContext{})
+}
+
+func getIctx(ctx context.Context) *InterceptorContext {
+ return middleware.GetStackValue(ctx, ictxKey{}).(*InterceptorContext)
+}
+
+// InterceptExecution runs Before/AfterExecutionInterceptors.
+type InterceptExecution struct {
+ BeforeExecution []BeforeExecutionInterceptor
+ AfterExecution []AfterExecutionInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptExecution) ID() string {
+ return "InterceptExecution"
+}
+
+// HandleInitialize runs the interceptors.
+func (m *InterceptExecution) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ out middleware.InitializeOutput, md middleware.Metadata, err error,
+) {
+ ctx = withIctx(ctx)
+ getIctx(ctx).Input = in.Parameters
+
+ for _, i := range m.BeforeExecution {
+ if err := i.BeforeExecution(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ out, md, err = next.HandleInitialize(ctx, in)
+
+ for _, i := range m.AfterExecution {
+ if err := i.AfterExecution(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
+
+// InterceptBeforeSerialization runs BeforeSerializationInterceptors.
+type InterceptBeforeSerialization struct {
+ Interceptors []BeforeSerializationInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptBeforeSerialization) ID() string {
+ return "InterceptBeforeSerialization"
+}
+
+// HandleSerialize runs the interceptors.
+func (m *InterceptBeforeSerialization) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.Interceptors {
+ if err := i.BeforeSerialization(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+// InterceptAfterSerialization runs AfterSerializationInterceptors.
+type InterceptAfterSerialization struct {
+ Interceptors []AfterSerializationInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptAfterSerialization) ID() string {
+ return "InterceptAfterSerialization"
+}
+
+// HandleSerialize runs the interceptors.
+func (m *InterceptAfterSerialization) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, md middleware.Metadata, err error,
+) {
+ getIctx(ctx).Request = in.Request.(*Request)
+
+ for _, i := range m.Interceptors {
+ if err := i.AfterSerialization(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+// InterceptBeforeRetryLoop runs BeforeRetryLoopInterceptors.
+type InterceptBeforeRetryLoop struct {
+ Interceptors []BeforeRetryLoopInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptBeforeRetryLoop) ID() string {
+ return "InterceptBeforeRetryLoop"
+}
+
+// HandleFinalize runs the interceptors.
+func (m *InterceptBeforeRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.Interceptors {
+ if err := i.BeforeRetryLoop(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+// InterceptBeforeSigning runs BeforeSigningInterceptors.
+type InterceptBeforeSigning struct {
+ Interceptors []BeforeSigningInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptBeforeSigning) ID() string {
+ return "InterceptBeforeSigning"
+}
+
+// HandleFinalize runs the interceptors.
+func (m *InterceptBeforeSigning) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.Interceptors {
+ if err := i.BeforeSigning(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+// InterceptAfterSigning runs AfterSigningInterceptors.
+type InterceptAfterSigning struct {
+ Interceptors []AfterSigningInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptAfterSigning) ID() string {
+ return "InterceptAfterSigning"
+}
+
+// HandleFinalize runs the interceptors.
+func (m *InterceptAfterSigning) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.Interceptors {
+ if err := i.AfterSigning(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+// InterceptTransmit runs BeforeTransmitInterceptors and AfterTransmitInterceptors.
+type InterceptTransmit struct {
+ BeforeTransmit []BeforeTransmitInterceptor
+ AfterTransmit []AfterTransmitInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptTransmit) ID() string {
+ return "InterceptTransmit"
+}
+
+// HandleDeserialize runs the interceptors.
+func (m *InterceptTransmit) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.BeforeTransmit {
+ if err := i.BeforeTransmit(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ out, md, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, md, err
+ }
+
+ // the root of the decorated middleware guarantees this will be here
+ // (client.go: ClientHandler.Handle)
+ getIctx(ctx).Response = out.RawResponse.(*Response)
+
+ for _, i := range m.AfterTransmit {
+ if err := i.AfterTransmit(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
+
+// InterceptBeforeDeserialization runs BeforeDeserializationInterceptors.
+type InterceptBeforeDeserialization struct {
+ Interceptors []BeforeDeserializationInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptBeforeDeserialization) ID() string {
+ return "InterceptBeforeDeserialization"
+}
+
+// HandleDeserialize runs the interceptors.
+func (m *InterceptBeforeDeserialization) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, md middleware.Metadata, err error,
+) {
+ out, md, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ var terr *RequestSendError
+ if errors.As(err, &terr) {
+ return out, md, err
+ }
+ }
+
+ for _, i := range m.Interceptors {
+ if err := i.BeforeDeserialization(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
+
+// InterceptAfterDeserialization runs AfterDeserializationInterceptors.
+type InterceptAfterDeserialization struct {
+ Interceptors []AfterDeserializationInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptAfterDeserialization) ID() string {
+ return "InterceptAfterDeserialization"
+}
+
+// HandleDeserialize runs the interceptors.
+func (m *InterceptAfterDeserialization) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, md middleware.Metadata, err error,
+) {
+ out, md, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ var terr *RequestSendError
+ if errors.As(err, &terr) {
+ return out, md, err
+ }
+ }
+
+ getIctx(ctx).Output = out.Result
+
+ for _, i := range m.Interceptors {
+ if err := i.AfterDeserialization(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
+
+// InterceptAttempt runs AfterAttemptInterceptors.
+type InterceptAttempt struct {
+ BeforeAttempt []BeforeAttemptInterceptor
+ AfterAttempt []AfterAttemptInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptAttempt) ID() string {
+ return "InterceptAttempt"
+}
+
+// HandleFinalize runs the interceptors.
+func (m *InterceptAttempt) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.BeforeAttempt {
+ if err := i.BeforeAttempt(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ out, md, err = next.HandleFinalize(ctx, in)
+
+ for _, i := range m.AfterAttempt {
+ if err := i.AfterAttempt(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go
new file mode 100644
index 000000000..941a8d6b5
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go
@@ -0,0 +1,75 @@
+package io
+
+import (
+ "io"
+ "sync"
+)
+
+// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser.
+func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser {
+ sr := &safeReadCloser{
+ readCloser: readCloser,
+ }
+
+ if _, ok := readCloser.(io.WriterTo); ok {
+ return &safeWriteToReadCloser{safeReadCloser: sr}
+ }
+
+ return sr
+}
+
+// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic
+// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this
+// type.
+type safeWriteToReadCloser struct {
+ *safeReadCloser
+}
+
+// WriteTo implements the io.WriteTo interface.
+func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) {
+ r.safeReadCloser.mtx.Lock()
+ defer r.safeReadCloser.mtx.Unlock()
+
+ if r.safeReadCloser.closed {
+ return 0, io.EOF
+ }
+
+ return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w)
+}
+
+// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser
+// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type
+// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime
+// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported.
+// This type is thread-safe.
+type safeReadCloser struct {
+ readCloser io.ReadCloser
+ closed bool
+ mtx sync.Mutex
+}
+
+// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned.
+func (r *safeReadCloser) Read(p []byte) (n int, err error) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ if r.closed {
+ return 0, io.EOF
+ }
+
+ return r.readCloser.Read(p)
+}
+
+// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error
+// reported from Close. Subsequent calls to Close will always return a nil error.
+func (r *safeReadCloser) Close() error {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ if r.closed {
+ return nil
+ }
+
+ r.closed = true
+ rc := r.readCloser
+ r.readCloser = nil
+ return rc.Close()
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go
new file mode 100644
index 000000000..5d6a4b23a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go
@@ -0,0 +1,25 @@
+package http
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "fmt"
+ "io"
+)
+
+// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents.
+// Returns the byte slice of md5 checksum and an error.
+func computeMD5Checksum(r io.Reader) ([]byte, error) {
+ h := md5.New()
+ // copy errors may be assumed to be from the body.
+ _, err := io.Copy(h, r)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read body: %w", err)
+ }
+
+ // encode the md5 checksum in base64.
+ sum := h.Sum(nil)
+ sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
+ base64.StdEncoding.Encode(sum64, sum)
+ return sum64, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go
new file mode 100644
index 000000000..b4cd4a47e
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/metrics.go
@@ -0,0 +1,204 @@
+package http
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "net/http/httptrace"
+ "sync/atomic"
+ "time"
+
+ "github.com/aws/smithy-go/metrics"
+)
+
+var now = time.Now
+
+// withMetrics instruments an HTTP client and context to collect HTTP metrics.
+func withMetrics(parent context.Context, client ClientDo, meter metrics.Meter) (
+ context.Context, ClientDo, error,
+) {
+ // WithClientTrace is an expensive operation - avoid calling it if we're
+ // not actually using a metrics sink.
+ if _, ok := meter.(metrics.NopMeter); ok {
+ return parent, client, nil
+ }
+
+ hm, err := newHTTPMetrics(meter)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ctx := httptrace.WithClientTrace(parent, &httptrace.ClientTrace{
+ DNSStart: hm.DNSStart,
+ ConnectStart: hm.ConnectStart,
+ TLSHandshakeStart: hm.TLSHandshakeStart,
+
+ GotConn: hm.GotConn(parent),
+ PutIdleConn: hm.PutIdleConn(parent),
+ ConnectDone: hm.ConnectDone(parent),
+ DNSDone: hm.DNSDone(parent),
+ TLSHandshakeDone: hm.TLSHandshakeDone(parent),
+ GotFirstResponseByte: hm.GotFirstResponseByte(parent),
+ })
+ return ctx, &timedClientDo{client, hm}, nil
+}
+
+type timedClientDo struct {
+ ClientDo
+ hm *httpMetrics
+}
+
+func (c *timedClientDo) Do(r *http.Request) (*http.Response, error) {
+ c.hm.doStart.Store(now())
+ resp, err := c.ClientDo.Do(r)
+
+ c.hm.DoRequestDuration.Record(r.Context(), c.hm.doStart.Elapsed())
+ return resp, err
+}
+
+type httpMetrics struct {
+ DNSLookupDuration metrics.Float64Histogram // client.http.connections.dns_lookup_duration
+ ConnectDuration metrics.Float64Histogram // client.http.connections.acquire_duration
+ TLSHandshakeDuration metrics.Float64Histogram // client.http.connections.tls_handshake_duration
+ ConnectionUsage metrics.Int64UpDownCounter // client.http.connections.usage
+
+ DoRequestDuration metrics.Float64Histogram // client.http.do_request_duration
+ TimeToFirstByte metrics.Float64Histogram // client.http.time_to_first_byte
+
+ doStart safeTime
+ dnsStart safeTime
+ connectStart safeTime
+ tlsStart safeTime
+}
+
+func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) {
+ hm := &httpMetrics{}
+
+ var err error
+ hm.DNSLookupDuration, err = meter.Float64Histogram("client.http.connections.dns_lookup_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "The time it takes a request to perform DNS lookup."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.ConnectDuration, err = meter.Float64Histogram("client.http.connections.acquire_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "The time it takes a request to acquire a connection."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.TLSHandshakeDuration, err = meter.Float64Histogram("client.http.connections.tls_handshake_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "The time it takes an HTTP request to perform the TLS handshake."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.ConnectionUsage, err = meter.Int64UpDownCounter("client.http.connections.usage", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "{connection}"
+ o.Description = "Current state of connections pool."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.DoRequestDuration, err = meter.Float64Histogram("client.http.do_request_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "Time spent performing an entire HTTP transaction."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.TimeToFirstByte, err = meter.Float64Histogram("client.http.time_to_first_byte", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "Time from start of transaction to when the first response byte is available."
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return hm, nil
+}
+
+func (m *httpMetrics) DNSStart(httptrace.DNSStartInfo) {
+ m.dnsStart.Store(now())
+}
+
+func (m *httpMetrics) ConnectStart(string, string) {
+ m.connectStart.Store(now())
+}
+
+func (m *httpMetrics) TLSHandshakeStart() {
+ m.tlsStart.Store(now())
+}
+
+func (m *httpMetrics) GotConn(ctx context.Context) func(httptrace.GotConnInfo) {
+ return func(httptrace.GotConnInfo) {
+ m.addConnAcquired(ctx, 1)
+ }
+}
+
+func (m *httpMetrics) PutIdleConn(ctx context.Context) func(error) {
+ return func(error) {
+ m.addConnAcquired(ctx, -1)
+ }
+}
+
+func (m *httpMetrics) DNSDone(ctx context.Context) func(httptrace.DNSDoneInfo) {
+ return func(httptrace.DNSDoneInfo) {
+ m.DNSLookupDuration.Record(ctx, m.dnsStart.Elapsed())
+ }
+}
+
+func (m *httpMetrics) ConnectDone(ctx context.Context) func(string, string, error) {
+ return func(string, string, error) {
+ m.ConnectDuration.Record(ctx, m.connectStart.Elapsed())
+ }
+}
+
+func (m *httpMetrics) TLSHandshakeDone(ctx context.Context) func(tls.ConnectionState, error) {
+ return func(tls.ConnectionState, error) {
+ m.TLSHandshakeDuration.Record(ctx, m.tlsStart.Elapsed())
+ }
+}
+
+func (m *httpMetrics) GotFirstResponseByte(ctx context.Context) func() {
+ return func() {
+ m.TimeToFirstByte.Record(ctx, m.doStart.Elapsed())
+ }
+}
+
+func (m *httpMetrics) addConnAcquired(ctx context.Context, incr int64) {
+ m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("state", "acquired")
+ })
+}
+
+// Not used: it is recommended to track acquired vs idle conn, but we can't
+// determine when something is truly idle with the current HTTP client hooks
+// available to us.
+func (m *httpMetrics) addConnIdle(ctx context.Context, incr int64) {
+ m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("state", "idle")
+ })
+}
+
+type safeTime struct {
+ atomic.Value // time.Time
+}
+
+func (st *safeTime) Store(v time.Time) {
+ st.Value.Store(v)
+}
+
+func (st *safeTime) Load() time.Time {
+ t, _ := st.Value.Load().(time.Time)
+ return t
+}
+
+func (st *safeTime) Elapsed() float64 {
+ end := now()
+ elapsed := end.Sub(st.Load())
+ return float64(elapsed) / 1e9
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go
new file mode 100644
index 000000000..914338f2e
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go
@@ -0,0 +1,79 @@
+package http
+
+import (
+ "context"
+ "io"
+
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically
+// close the response body of an operation request if the request response
+// failed.
+func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before)
+}
+
+type errorCloseResponseBodyMiddleware struct{}
+
+func (*errorCloseResponseBodyMiddleware) ID() string {
+ return "ErrorCloseResponseBody"
+}
+
+func (m *errorCloseResponseBodyMiddleware) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err := next.HandleDeserialize(ctx, input)
+ if err != nil {
+ if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil {
+ // Consume the full body to prevent TCP connection resets on some platforms
+ _, _ = io.Copy(io.Discard, resp.Body)
+ // Do not validate that the response closes successfully.
+ resp.Body.Close()
+ }
+ }
+
+ return out, metadata, err
+}
+
+// AddCloseResponseBodyMiddleware adds the middleware to automatically close
+// the response body of an operation request, after the response had been
+// deserialized.
+func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before)
+}
+
+type closeResponseBody struct{}
+
+func (*closeResponseBody) ID() string {
+ return "CloseResponseBody"
+}
+
+func (m *closeResponseBody) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err := next.HandleDeserialize(ctx, input)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ if resp, ok := out.RawResponse.(*Response); ok {
+ // Consume the full body to prevent TCP connection resets on some platforms
+ _, copyErr := io.Copy(io.Discard, resp.Body)
+ if copyErr != nil {
+ middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse")
+ }
+
+ closeErr := resp.Body.Close()
+ if closeErr != nil {
+ middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse")
+ }
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go
new file mode 100644
index 000000000..9969389bb
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go
@@ -0,0 +1,84 @@
+package http
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// ComputeContentLength provides a middleware to set the content-length
+// header for the length of a serialize request body.
+type ComputeContentLength struct {
+}
+
+// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware
+// stack's Build step.
+func AddComputeContentLengthMiddleware(stack *middleware.Stack) error {
+ return stack.Build.Add(&ComputeContentLength{}, middleware.After)
+}
+
+// ID returns the identifier for the ComputeContentLength.
+func (m *ComputeContentLength) ID() string { return "ComputeContentLength" }
+
+// HandleBuild adds the length of the serialized request to the HTTP header
+// if the length can be determined.
+func (m *ComputeContentLength) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // do nothing if request content-length was set to 0 or above.
+ if req.ContentLength >= 0 {
+ return next.HandleBuild(ctx, in)
+ }
+
+ // attempt to compute stream length
+ if n, ok, err := req.StreamLength(); err != nil {
+ return out, metadata, fmt.Errorf(
+ "failed getting length of request stream, %w", err)
+ } else if ok {
+ req.ContentLength = n
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+// validateContentLength provides a middleware to validate the content-length
+// is valid (greater than zero), for the serialized request payload.
+type validateContentLength struct{}
+
+// ValidateContentLengthHeader adds middleware that validates request content-length
+// is set to value greater than zero.
+func ValidateContentLengthHeader(stack *middleware.Stack) error {
+ return stack.Build.Add(&validateContentLength{}, middleware.After)
+}
+
+// ID returns the identifier for the ComputeContentLength.
+func (m *validateContentLength) ID() string { return "ValidateContentLength" }
+
+// HandleBuild adds the length of the serialized request to the HTTP header
+// if the length can be determined.
+func (m *validateContentLength) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ // if request content-length was set to less than 0, return an error
+ if req.ContentLength < 0 {
+ return out, metadata, fmt.Errorf(
+ "content length for payload is required and must be at least 0")
+ }
+
+ return next.HandleBuild(ctx, in)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go
new file mode 100644
index 000000000..855c22720
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go
@@ -0,0 +1,81 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// WithHeaderComment instruments a middleware stack to append an HTTP field
+// comment to the given header as specified in RFC 9110
+// (https://www.rfc-editor.org/rfc/rfc9110#name-comments).
+//
+// The header is case-insensitive. If the provided header exists when the
+// middleware runs, the content will be inserted as-is enclosed in parentheses.
+//
+// Note that per the HTTP specification, comments are only allowed in fields
+// containing "comment" as part of their field value definition, but this API
+// will NOT verify whether the provided header is one of them.
+//
+// WithHeaderComment MAY be applied more than once to a middleware stack and/or
+// more than once per header.
+func WithHeaderComment(header, content string) func(*middleware.Stack) error {
+ return func(s *middleware.Stack) error {
+ m, err := getOrAddHeaderComment(s)
+ if err != nil {
+ return fmt.Errorf("get or add header comment: %v", err)
+ }
+
+ m.values.Add(header, content)
+ return nil
+ }
+}
+
+type headerCommentMiddleware struct {
+ values http.Header // hijack case-insensitive access APIs
+}
+
+func (*headerCommentMiddleware) ID() string {
+ return "headerComment"
+}
+
+func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ r, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ for h, contents := range m.values {
+ for _, c := range contents {
+ if existing := r.Header.Get(h); existing != "" {
+ r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c))
+ }
+ }
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) {
+ id := (*headerCommentMiddleware)(nil).ID()
+ m, ok := s.Build.Get(id)
+ if !ok {
+ m := &headerCommentMiddleware{values: http.Header{}}
+ if err := s.Build.Add(m, middleware.After); err != nil {
+ return nil, fmt.Errorf("add build: %v", err)
+ }
+
+ return m, nil
+ }
+
+ hc, ok := m.(*headerCommentMiddleware)
+ if !ok {
+ return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id)
+ }
+
+ return hc, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go
new file mode 100644
index 000000000..eac32b4ba
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go
@@ -0,0 +1,167 @@
+package http
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type isContentTypeAutoSet struct{}
+
+// SetIsContentTypeDefaultValue returns a Context specifying if the request's
+// content-type header was set to a default value.
+func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context {
+ return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault)
+}
+
+// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the
+// request is a default value that was auto assigned by an operation
+// serializer. Allows middleware post serialization to know if the content-type
+// was auto set to a default value or not.
+//
+// Also returns false if the Context value was never updated to include if
+// content-type was set to a default value.
+func GetIsContentTypeDefaultValue(ctx context.Context) bool {
+ v, _ := ctx.Value(isContentTypeAutoSet{}).(bool)
+ return v
+}
+
+// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover
+// middleware to the stack after the operation serializer. This middleware will
+// remove the content-type header from the request if it was set as a default
+// value, and no request payload is present.
+//
+// Returns error if unable to add the middleware.
+func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) {
+ err = stack.Serialize.Insert(removeDefaultContentType{},
+ "OperationSerializer", middleware.After)
+ if err != nil {
+ return fmt.Errorf("failed to add %s serialize middleware, %w",
+ removeDefaultContentType{}.ID(), err)
+ }
+
+ return nil
+}
+
+// RemoveNoPayloadDefaultContentTypeRemover removes the
+// DefaultContentTypeRemover middleware from the stack. Returns an error if
+// unable to remove the middleware.
+func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) {
+ _, err = stack.Serialize.Remove(removeDefaultContentType{}.ID())
+ if err != nil {
+ return fmt.Errorf("failed to remove %s serialize middleware, %w",
+ removeDefaultContentType{}.ID(), err)
+
+ }
+ return nil
+}
+
+// removeDefaultContentType provides after serialization middleware that will
+// remove the content-type header from an HTTP request if the header was set as
+// a default value by the operation serializer, and there is no request payload.
+type removeDefaultContentType struct{}
+
+// ID returns the middleware ID
+func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" }
+
+// HandleSerialize implements the serialization middleware.
+func (removeDefaultContentType) HandleSerialize(
+ ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, meta middleware.Metadata, err error,
+) {
+ req, ok := input.Request.(*Request)
+ if !ok {
+ return out, meta, fmt.Errorf(
+ "unexpected request type %T for removeDefaultContentType middleware",
+ input.Request)
+ }
+
+ if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil {
+ req.Header.Del("Content-Type")
+ input.Request = req
+ }
+
+ return next.HandleSerialize(ctx, input)
+}
+
+type headerValue struct {
+ header string
+ value string
+ append bool
+}
+
+type headerValueHelper struct {
+ headerValues []headerValue
+}
+
+func (h *headerValueHelper) addHeaderValue(value headerValue) {
+ h.headerValues = append(h.headerValues, value)
+}
+
+func (h *headerValueHelper) ID() string {
+ return "HTTPHeaderHelper"
+}
+
+func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) {
+ req, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ for _, value := range h.headerValues {
+ if value.append {
+ req.Header.Add(value.header, value.value)
+ } else {
+ req.Header.Set(value.header, value.value)
+ }
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) {
+ id := (*headerValueHelper)(nil).ID()
+ m, ok := stack.Build.Get(id)
+ if !ok {
+ m = &headerValueHelper{}
+ err := stack.Build.Add(m, middleware.After)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ requestUserAgent, ok := m.(*headerValueHelper)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id)
+ }
+
+ return requestUserAgent, nil
+}
+
+// AddHeaderValue returns a stack mutator that adds the header value pair to header.
+// Appends to any existing values if present.
+func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ helper, err := getOrAddHeaderValueHelper(stack)
+ if err != nil {
+ return err
+ }
+ helper.addHeaderValue(headerValue{header: header, value: value, append: true})
+ return nil
+ }
+}
+
+// SetHeaderValue returns a stack mutator that adds the header value pair to header.
+// Replaces any existing values if present.
+func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error {
+ return func(stack *middleware.Stack) error {
+ helper, err := getOrAddHeaderValueHelper(stack)
+ if err != nil {
+ return err
+ }
+ helper.addHeaderValue(headerValue{header: header, value: value, append: false})
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go
new file mode 100644
index 000000000..d5909b0a2
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go
@@ -0,0 +1,75 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "net/http/httputil"
+
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally
+// their respective bodies. Will not perform any logging if none of the options are set.
+type RequestResponseLogger struct {
+ LogRequest bool
+ LogRequestWithBody bool
+
+ LogResponse bool
+ LogResponseWithBody bool
+}
+
+// ID is the middleware identifier.
+func (r *RequestResponseLogger) ID() string {
+ return "RequestResponseLogger"
+}
+
+// HandleDeserialize will log the request and response HTTP messages if configured accordingly.
+func (r *RequestResponseLogger) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ logger := middleware.GetLogger(ctx)
+
+ if r.LogRequest || r.LogRequestWithBody {
+ smithyRequest, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in)
+ }
+
+ rc := smithyRequest.Build(ctx)
+ reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ logger.Logf(logging.Debug, "Request\n%v", string(reqBytes))
+
+ if r.LogRequestWithBody {
+ smithyRequest, err = smithyRequest.SetStream(rc.Body)
+ if err != nil {
+ return out, metadata, err
+ }
+ in.Request = smithyRequest
+ }
+ }
+
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+
+ if (err == nil) && (r.LogResponse || r.LogResponseWithBody) {
+ smithyResponse, ok := out.RawResponse.(*Response)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse)
+ }
+
+ respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to dump response %w", err)
+ }
+
+ logger.Logf(logging.Debug, "Response\n%v", string(respBytes))
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go
new file mode 100644
index 000000000..d6079b259
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go
@@ -0,0 +1,51 @@
+package http
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type (
+ hostnameImmutableKey struct{}
+ hostPrefixDisableKey struct{}
+)
+
+// GetHostnameImmutable retrieves whether the endpoint hostname should be considered
+// immutable or not.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func GetHostnameImmutable(ctx context.Context) (v bool) {
+ v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool)
+ return v
+}
+
+// SetHostnameImmutable sets or modifies whether the request's endpoint hostname
+// should be considered immutable or not.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func SetHostnameImmutable(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value)
+}
+
+// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is
+// disabled.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) {
+ v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool)
+ return v
+}
+
+// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host
+// prefixing should be disabled. If value is true, endpoint host prefixing
+// will be disabled.
+//
+// Scoped to stack values. Use middleware#ClearStackValues to clear all stack
+// values.
+func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go
new file mode 100644
index 000000000..326cb8a6c
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go
@@ -0,0 +1,79 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ "strings"
+)
+
+// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum
+// HTTP protocol version.
+type MinimumProtocolError struct {
+ proto string
+ expectedProtoMajor int
+ expectedProtoMinor int
+}
+
+// Error returns the error message.
+func (m *MinimumProtocolError) Error() string {
+ return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s",
+ m.expectedProtoMajor, m.expectedProtoMinor, m.proto)
+}
+
+// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection
+// meets the minimum major ad minor version.
+type RequireMinimumProtocol struct {
+ ProtoMajor int
+ ProtoMinor int
+}
+
+// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum
+// protocol major and minor version.
+func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error {
+ return stack.Deserialize.Insert(&RequireMinimumProtocol{
+ ProtoMajor: major,
+ ProtoMinor: minor,
+ }, "OperationDeserializer", middleware.Before)
+}
+
+// ID returns the middleware identifier string.
+func (r *RequireMinimumProtocol) ID() string {
+ return "RequireMinimumProtocol"
+}
+
+// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor
+// protocol version.
+func (r *RequireMinimumProtocol) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ response, ok := out.RawResponse.(*Response)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse)
+ }
+
+ if !strings.HasPrefix(response.Proto, "HTTP") {
+ return out, metadata, &MinimumProtocolError{
+ proto: response.Proto,
+ expectedProtoMajor: r.ProtoMajor,
+ expectedProtoMinor: r.ProtoMinor,
+ }
+ }
+
+ if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor {
+ return out, metadata, &MinimumProtocolError{
+ proto: response.Proto,
+ expectedProtoMajor: r.ProtoMajor,
+ expectedProtoMinor: r.ProtoMinor,
+ }
+ }
+
+ return out, metadata, err
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/properties.go b/vendor/github.com/aws/smithy-go/transport/http/properties.go
new file mode 100644
index 000000000..c65aa3932
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/properties.go
@@ -0,0 +1,80 @@
+package http
+
+import smithy "github.com/aws/smithy-go"
+
+type (
+ sigV4SigningNameKey struct{}
+ sigV4SigningRegionKey struct{}
+
+ sigV4ASigningNameKey struct{}
+ sigV4ASigningRegionsKey struct{}
+
+ isUnsignedPayloadKey struct{}
+ disableDoubleEncodingKey struct{}
+)
+
+// GetSigV4SigningName gets the signing name from Properties.
+func GetSigV4SigningName(p *smithy.Properties) (string, bool) {
+ v, ok := p.Get(sigV4SigningNameKey{}).(string)
+ return v, ok
+}
+
+// SetSigV4SigningName sets the signing name on Properties.
+func SetSigV4SigningName(p *smithy.Properties, name string) {
+ p.Set(sigV4SigningNameKey{}, name)
+}
+
+// GetSigV4SigningRegion gets the signing region from Properties.
+func GetSigV4SigningRegion(p *smithy.Properties) (string, bool) {
+ v, ok := p.Get(sigV4SigningRegionKey{}).(string)
+ return v, ok
+}
+
+// SetSigV4SigningRegion sets the signing region on Properties.
+func SetSigV4SigningRegion(p *smithy.Properties, region string) {
+ p.Set(sigV4SigningRegionKey{}, region)
+}
+
+// GetSigV4ASigningName gets the v4a signing name from Properties.
+func GetSigV4ASigningName(p *smithy.Properties) (string, bool) {
+ v, ok := p.Get(sigV4ASigningNameKey{}).(string)
+ return v, ok
+}
+
+// SetSigV4ASigningName sets the signing name on Properties.
+func SetSigV4ASigningName(p *smithy.Properties, name string) {
+ p.Set(sigV4ASigningNameKey{}, name)
+}
+
+// GetSigV4ASigningRegion gets the v4a signing region set from Properties.
+func GetSigV4ASigningRegions(p *smithy.Properties) ([]string, bool) {
+ v, ok := p.Get(sigV4ASigningRegionsKey{}).([]string)
+ return v, ok
+}
+
+// SetSigV4ASigningRegions sets the v4a signing region set on Properties.
+func SetSigV4ASigningRegions(p *smithy.Properties, regions []string) {
+ p.Set(sigV4ASigningRegionsKey{}, regions)
+}
+
+// GetIsUnsignedPayload gets whether the payload is unsigned from Properties.
+func GetIsUnsignedPayload(p *smithy.Properties) (bool, bool) {
+ v, ok := p.Get(isUnsignedPayloadKey{}).(bool)
+ return v, ok
+}
+
+// SetIsUnsignedPayload sets whether the payload is unsigned on Properties.
+func SetIsUnsignedPayload(p *smithy.Properties, isUnsignedPayload bool) {
+ p.Set(isUnsignedPayloadKey{}, isUnsignedPayload)
+}
+
+// GetDisableDoubleEncoding gets whether the payload is unsigned from Properties.
+func GetDisableDoubleEncoding(p *smithy.Properties) (bool, bool) {
+ v, ok := p.Get(disableDoubleEncodingKey{}).(bool)
+ return v, ok
+}
+
+// SetDisableDoubleEncoding sets whether the payload is unsigned on Properties.
+func SetDisableDoubleEncoding(p *smithy.Properties, disableDoubleEncoding bool) {
+ p.Set(disableDoubleEncodingKey{}, disableDoubleEncoding)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go
new file mode 100644
index 000000000..5cbf6f10a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/request.go
@@ -0,0 +1,188 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ iointernal "github.com/aws/smithy-go/transport/http/internal/io"
+)
+
+// Request provides the HTTP specific request structure for HTTP specific
+// middleware steps to use to serialize input, and send an operation's request.
+type Request struct {
+ *http.Request
+ stream io.Reader
+ isStreamSeekable bool
+ streamStartPos int64
+}
+
+// NewStackRequest returns an initialized request ready to be populated with the
+// HTTP request details. Returns empty interface so the function can be used as
+// a parameter to the Smithy middleware Stack constructor.
+func NewStackRequest() interface{} {
+ return &Request{
+ Request: &http.Request{
+ URL: &url.URL{},
+ Header: http.Header{},
+ ContentLength: -1, // default to unknown length
+ },
+ }
+}
+
+// IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set.
+func (r *Request) IsHTTPS() bool {
+ if r.URL == nil {
+ return false
+ }
+ return strings.EqualFold(r.URL.Scheme, "https")
+}
+
+// Clone returns a deep copy of the Request for the new context. A reference to
+// the Stream is copied, but the underlying stream is not copied.
+func (r *Request) Clone() *Request {
+ rc := *r
+ rc.Request = rc.Request.Clone(context.TODO())
+ return &rc
+}
+
+// StreamLength returns the number of bytes of the serialized stream attached
+// to the request and ok set. If the length cannot be determined, an error will
+// be returned.
+func (r *Request) StreamLength() (size int64, ok bool, err error) {
+ return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos)
+}
+
+func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) {
+ if stream == nil {
+ return 0, true, nil
+ }
+
+ if l, ok := stream.(interface{ Len() int }); ok {
+ return int64(l.Len()), true, nil
+ }
+
+ if !seekable {
+ return 0, false, nil
+ }
+
+ s := stream.(io.Seeker)
+ endOffset, err := s.Seek(0, io.SeekEnd)
+ if err != nil {
+ return 0, false, err
+ }
+
+ // The reason to seek to streamStartPos instead of 0 is to ensure that the
+ // SDK only sends the stream from the starting position the user's
+ // application provided it to the SDK at. For example application opens a
+ // file, and wants to skip the first N bytes uploading the rest. The
+ // application would move the file's offset N bytes, then hand it off to
+ // the SDK to send the remaining. The SDK should respect that initial offset.
+ _, err = s.Seek(startPos, io.SeekStart)
+ if err != nil {
+ return 0, false, err
+ }
+
+ return endOffset - startPos, true, nil
+}
+
+// RewindStream will rewind the io.Reader to the relative start position if it
+// is an io.Seeker.
+func (r *Request) RewindStream() error {
+ // If there is no stream there is nothing to rewind.
+ if r.stream == nil {
+ return nil
+ }
+
+ if !r.isStreamSeekable {
+ return fmt.Errorf("request stream is not seekable")
+ }
+ _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart)
+ return err
+}
+
+// GetStream returns the request stream io.Reader if a stream is set. If no
+// stream is present nil will be returned.
+func (r *Request) GetStream() io.Reader {
+ return r.stream
+}
+
+// IsStreamSeekable returns whether the stream is seekable.
+func (r *Request) IsStreamSeekable() bool {
+ return r.isStreamSeekable
+}
+
+// SetStream returns a clone of the request with the stream set to the provided
+// reader. May return an error if the provided reader is seekable but returns
+// an error.
+func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) {
+ rc = r.Clone()
+
+ if reader == http.NoBody {
+ reader = nil
+ }
+
+ var isStreamSeekable bool
+ var streamStartPos int64
+ switch v := reader.(type) {
+ case io.Seeker:
+ n, err := v.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return r, err
+ }
+ isStreamSeekable = true
+ streamStartPos = n
+ default:
+ // If the stream length can be determined, and is determined to be empty,
+ // use a nil stream to prevent confusion between empty vs not-empty
+ // streams.
+ length, ok, err := streamLength(reader, false, 0)
+ if err != nil {
+ return nil, err
+ } else if ok && length == 0 {
+ reader = nil
+ }
+ }
+
+ rc.stream = reader
+ rc.isStreamSeekable = isStreamSeekable
+ rc.streamStartPos = streamStartPos
+
+ return rc, err
+}
+
+// Build returns a build standard HTTP request value from the Smithy request.
+// The request's stream is wrapped in a safe container that allows it to be
+// reused for subsequent attempts.
+func (r *Request) Build(ctx context.Context) *http.Request {
+ req := r.Request.Clone(ctx)
+
+ if r.stream == nil && req.ContentLength == -1 {
+ req.ContentLength = 0
+ }
+
+ switch stream := r.stream.(type) {
+ case *io.PipeReader:
+ req.Body = io.NopCloser(stream)
+ req.ContentLength = -1
+ default:
+ // HTTP Client Request must only have a non-nil body if the
+ // ContentLength is explicitly unknown (-1) or non-zero. The HTTP
+ // Client will interpret a non-nil body and ContentLength 0 as
+ // "unknown". This is unwanted behavior.
+ if req.ContentLength != 0 && r.stream != nil {
+ req.Body = iointernal.NewSafeReadCloser(io.NopCloser(stream))
+ }
+ }
+
+ return req
+}
+
+// RequestCloner is a function that can take an input request type and clone the request
+// for use in a subsequent retry attempt.
+func RequestCloner(v interface{}) interface{} {
+ return v.(*Request).Clone()
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go
new file mode 100644
index 000000000..0c13bfcc8
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/response.go
@@ -0,0 +1,34 @@
+package http
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// Response provides the HTTP specific response structure for HTTP specific
+// middleware steps to use to deserialize the response from an operation call.
+type Response struct {
+ *http.Response
+}
+
+// ResponseError provides the HTTP centric error type wrapping the underlying
+// error with the HTTP response value.
+type ResponseError struct {
+ Response *Response
+ Err error
+}
+
+// HTTPStatusCode returns the HTTP response status code received from the service.
+func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode }
+
+// HTTPResponse returns the HTTP response received from the service.
+func (e *ResponseError) HTTPResponse() *Response { return e.Response }
+
+// Unwrap returns the nested error if any, or nil.
+func (e *ResponseError) Unwrap() error { return e.Err }
+
+func (e *ResponseError) Error() string {
+ return fmt.Sprintf(
+ "http response error StatusCode: %d, %v",
+ e.Response.StatusCode, e.Err)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go
new file mode 100644
index 000000000..607b196a8
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/time.go
@@ -0,0 +1,13 @@
+package http
+
+import (
+ "time"
+
+ smithytime "github.com/aws/smithy-go/time"
+)
+
+// ParseTime parses a time string like the HTTP Date header. This uses a more
+// relaxed rule set for date parsing compared to the standard library.
+func ParseTime(text string) (t time.Time, err error) {
+ return smithytime.ParseHTTPDate(text)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/url.go b/vendor/github.com/aws/smithy-go/transport/http/url.go
new file mode 100644
index 000000000..60a5fc100
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/url.go
@@ -0,0 +1,44 @@
+package http
+
+import "strings"
+
+// JoinPath returns an absolute URL path composed of the two paths provided.
+// Enforces that the returned path begins with '/'. If added path is empty the
+// returned path suffix will match the first parameter suffix.
+func JoinPath(a, b string) string {
+ if len(a) == 0 {
+ a = "/"
+ } else if a[0] != '/' {
+ a = "/" + a
+ }
+
+ if len(b) != 0 && b[0] == '/' {
+ b = b[1:]
+ }
+
+ if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' {
+ a = a + "/"
+ }
+
+ return a + b
+}
+
+// JoinRawQuery returns an absolute raw query expression. Any duplicate '&'
+// will be collapsed to single separator between values.
+func JoinRawQuery(a, b string) string {
+ a = strings.TrimFunc(a, isAmpersand)
+ b = strings.TrimFunc(b, isAmpersand)
+
+ if len(a) == 0 {
+ return b
+ }
+ if len(b) == 0 {
+ return a
+ }
+
+ return a + "&" + b
+}
+
+func isAmpersand(v rune) bool {
+ return v == '&'
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go
new file mode 100644
index 000000000..71a7e0d8a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go
@@ -0,0 +1,37 @@
+package http
+
+import (
+ "strings"
+)
+
+// UserAgentBuilder is a builder for a HTTP User-Agent string.
+type UserAgentBuilder struct {
+ sb strings.Builder
+}
+
+// NewUserAgentBuilder returns a new UserAgentBuilder.
+func NewUserAgentBuilder() *UserAgentBuilder {
+ return &UserAgentBuilder{sb: strings.Builder{}}
+}
+
+// AddKey adds the named component/product to the agent string
+func (u *UserAgentBuilder) AddKey(key string) {
+ u.appendTo(key)
+}
+
+// AddKeyValue adds the named key to the agent string with the given value.
+func (u *UserAgentBuilder) AddKeyValue(key, value string) {
+ u.appendTo(key + "/" + value)
+}
+
+// Build returns the constructed User-Agent string. May be called multiple times.
+func (u *UserAgentBuilder) Build() string {
+ return u.sb.String()
+}
+
+func (u *UserAgentBuilder) appendTo(value string) {
+ if u.sb.Len() > 0 {
+ u.sb.WriteRune(' ')
+ }
+ u.sb.WriteString(value)
+}
diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go
new file mode 100644
index 000000000..b5eedc1f9
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/validation.go
@@ -0,0 +1,140 @@
+package smithy
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+// An InvalidParamsError provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type InvalidParamsError struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []InvalidParamError
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *InvalidParamsError) Add(err InvalidParamError) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another InvalidParamsError
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e *InvalidParamsError) Len() int {
+ return len(e.errs)
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e InvalidParamsError) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs))
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Error())
+ }
+
+ return w.String()
+}
+
+// Errs returns a slice of the invalid parameters
+func (e InvalidParamsError) Errs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An InvalidParamError represents an invalid parameter error type.
+type InvalidParamError interface {
+ error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type invalidParamError struct {
+ context string
+ nestedContext string
+ field string
+ reason string
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e invalidParamError) Error() string {
+ return fmt.Sprintf("%s, %s.", e.reason, e.Field())
+}
+
+// Field Returns the field and context the error occurred.
+func (e invalidParamError) Field() string {
+ sb := &strings.Builder{}
+ sb.WriteString(e.context)
+ if sb.Len() > 0 {
+ if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") {
+ sb.WriteRune('.')
+ }
+ }
+ if len(e.nestedContext) > 0 {
+ sb.WriteString(e.nestedContext)
+ sb.WriteRune('.')
+ }
+ sb.WriteString(e.field)
+ return sb.String()
+}
+
+// SetContext updates the base context of the error.
+func (e *invalidParamError) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *invalidParamError) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ return
+ }
+ // Check if our nested context is an index into a slice or map
+ if e.nestedContext[:1] != "[" {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ return
+ }
+ e.nestedContext = ctx + e.nestedContext
+}
+
+// An ParamRequiredError represents an required parameter error.
+type ParamRequiredError struct {
+ invalidParamError
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ParamRequiredError {
+ return &ParamRequiredError{
+ invalidParamError{
+ field: field,
+ reason: fmt.Sprintf("missing required field"),
+ },
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/waiter/logger.go b/vendor/github.com/aws/smithy-go/waiter/logger.go
new file mode 100644
index 000000000..8d70a03ff
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/waiter/logger.go
@@ -0,0 +1,36 @@
+package waiter
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// Logger is the Logger middleware used by the waiter to log an attempt
+type Logger struct {
+ // Attempt is the current attempt to be logged
+ Attempt int64
+}
+
+// ID representing the Logger middleware
+func (*Logger) ID() string {
+ return "WaiterLogger"
+}
+
+// HandleInitialize performs handling of request in initialize stack step
+func (m *Logger) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ logger := middleware.GetLogger(ctx)
+
+ logger.Logf(logging.Debug, fmt.Sprintf("attempting waiter request, attempt count: %d", m.Attempt))
+
+ return next.HandleInitialize(ctx, in)
+}
+
+// AddLogger is a helper util to add waiter logger after `SetLogger` middleware in
+func (m Logger) AddLogger(stack *middleware.Stack) error {
+ return stack.Initialize.Insert(&m, "SetLogger", middleware.After)
+}
diff --git a/vendor/github.com/aws/smithy-go/waiter/waiter.go b/vendor/github.com/aws/smithy-go/waiter/waiter.go
new file mode 100644
index 000000000..03e46e2ee
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/waiter/waiter.go
@@ -0,0 +1,66 @@
+package waiter
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/aws/smithy-go/rand"
+)
+
+// ComputeDelay computes delay between waiter attempts. The function takes in a current attempt count,
+// minimum delay, maximum delay, and remaining wait time for waiter as input. The inputs minDelay and maxDelay
+// must always be greater than 0, along with minDelay lesser than or equal to maxDelay.
+//
+// Returns the computed delay and if next attempt count is possible within the given input time constraints.
+// Note that the zeroth attempt results in no delay.
+func ComputeDelay(attempt int64, minDelay, maxDelay, remainingTime time.Duration) (delay time.Duration, err error) {
+ // zeroth attempt, no delay
+ if attempt <= 0 {
+ return 0, nil
+ }
+
+ // remainingTime is zero or less, no delay
+ if remainingTime <= 0 {
+ return 0, nil
+ }
+
+ // validate min delay is greater than 0
+ if minDelay == 0 {
+ return 0, fmt.Errorf("minDelay must be greater than zero when computing Delay")
+ }
+
+ // validate max delay is greater than 0
+ if maxDelay == 0 {
+ return 0, fmt.Errorf("maxDelay must be greater than zero when computing Delay")
+ }
+
+ // Get attempt ceiling to prevent integer overflow.
+ attemptCeiling := (math.Log(float64(maxDelay/minDelay)) / math.Log(2)) + 1
+
+ if attempt > int64(attemptCeiling) {
+ delay = maxDelay
+ } else {
+ // Compute exponential delay based on attempt.
+ ri := 1 << uint64(attempt-1)
+ // compute delay
+ delay = minDelay * time.Duration(ri)
+ }
+
+ if delay != minDelay {
+ // randomize to get jitter between min delay and delay value
+ d, err := rand.CryptoRandInt63n(int64(delay - minDelay))
+ if err != nil {
+ return 0, fmt.Errorf("error computing retry jitter, %w", err)
+ }
+
+ delay = time.Duration(d) + minDelay
+ }
+
+ // check if this is the last attempt possible and compute delay accordingly
+ if remainingTime-delay <= minDelay {
+ delay = remainingTime - minDelay
+ }
+
+ return delay, nil
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
index 16abdfc08..9433004a2 100644
--- a/vendor/github.com/cenkalti/backoff/v4/README.md
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -1,4 +1,4 @@
-# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
@@ -21,8 +21,6 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
-[travis]: https://travis-ci.org/cenkalti/backoff
-[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
index 2c56c1e71..aac99f196 100644
--- a/vendor/github.com/cenkalti/backoff/v4/exponential.go
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -71,6 +71,9 @@ type Clock interface {
Now() time.Time
}
+// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
+type ExponentialBackOffOpts func(*ExponentialBackOff)
+
// Default values for ExponentialBackOff.
const (
DefaultInitialInterval = 500 * time.Millisecond
@@ -81,7 +84,7 @@ const (
)
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
-func NewExponentialBackOff() *ExponentialBackOff {
+func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
b := &ExponentialBackOff{
InitialInterval: DefaultInitialInterval,
RandomizationFactor: DefaultRandomizationFactor,
@@ -91,10 +94,62 @@ func NewExponentialBackOff() *ExponentialBackOff {
Stop: Stop,
Clock: SystemClock,
}
+ for _, fn := range opts {
+ fn(b)
+ }
b.Reset()
return b
}
+// WithInitialInterval sets the initial interval between retries.
+func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.InitialInterval = duration
+ }
+}
+
+// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
+func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.RandomizationFactor = randomizationFactor
+ }
+}
+
+// WithMultiplier sets the multiplier for increasing the interval after each retry.
+func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Multiplier = multiplier
+ }
+}
+
+// WithMaxInterval sets the maximum interval between retries.
+func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxInterval = duration
+ }
+}
+
+// WithMaxElapsedTime sets the maximum total time for retries.
+func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxElapsedTime = duration
+ }
+}
+
+// WithRetryStopDuration sets the duration after which retries should stop.
+func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Stop = duration
+ }
+}
+
+// WithClockProvider sets the clock used to measure time.
+func WithClockProvider(clock Clock) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Clock = clock
+ }
+}
+
type systemClock struct{}
func (t systemClock) Now() time.Time {
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
index b071cea51..a9e1b72ba 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
@@ -35,6 +35,7 @@ import (
"runtime"
"strings"
"sync"
+ "sync/atomic"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
"github.com/klauspost/compress/zstd"
@@ -42,6 +43,8 @@ import (
"golang.org/x/sync/errgroup"
)
+type GzipHelperFunc func(io.Reader) (io.ReadCloser, error)
+
type options struct {
chunkSize int
compressionLevel int
@@ -50,6 +53,7 @@ type options struct {
compression Compression
ctx context.Context
minChunkSize int
+ gzipHelperFunc GzipHelperFunc
}
type Option func(o *options) error
@@ -127,11 +131,25 @@ func WithMinChunkSize(minChunkSize int) Option {
}
}
+// WithGzipHelperFunc option specifies a custom function to decompress gzip-compressed layers.
+// When a gzip-compressed layer is detected, this function will be used instead of the
+// Go standard library gzip decompression for better performance.
+// The function should take an io.Reader as input and return an io.ReadCloser.
+// If nil, the Go standard library gzip.NewReader will be used.
+func WithGzipHelperFunc(gzipHelperFunc GzipHelperFunc) Option {
+ return func(o *options) error {
+ o.gzipHelperFunc = gzipHelperFunc
+ return nil
+ }
+}
+
// Blob is an eStargz blob.
type Blob struct {
io.ReadCloser
- diffID digest.Digester
- tocDigest digest.Digest
+ diffID digest.Digester
+ tocDigest digest.Digest
+ readCompleted *atomic.Bool
+ uncompressedSize *atomic.Int64
}
// DiffID returns the digest of uncompressed blob.
@@ -145,6 +163,19 @@ func (b *Blob) TOCDigest() digest.Digest {
return b.tocDigest
}
+// UncompressedSize returns the size of uncompressed blob.
+// UncompressedSize should only be called after the blob has been fully read.
+func (b *Blob) UncompressedSize() (int64, error) {
+ switch {
+ case b.uncompressedSize == nil || b.readCompleted == nil:
+ return -1, fmt.Errorf("readCompleted or uncompressedSize is not initialized")
+ case !b.readCompleted.Load():
+ return -1, fmt.Errorf("called UncompressedSize before the blob has been fully read")
+ default:
+ return b.uncompressedSize.Load(), nil
+ }
+}
+
// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd
// or plain tar) passed through the argument. If there are some prioritized files are listed in
// the option, these files are grouped as "prioritized" and can be used for runtime optimization
@@ -186,7 +217,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
}
}()
- tarBlob, err := decompressBlob(tarBlob, layerFiles)
+ tarBlob, err := decompressBlob(tarBlob, layerFiles, opts.gzipHelperFunc)
if err != nil {
return nil, err
}
@@ -252,17 +283,28 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
}
diffID := digest.Canonical.Digester()
pr, pw := io.Pipe()
+ readCompleted := new(atomic.Bool)
+ uncompressedSize := new(atomic.Int64)
go func() {
- r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
+ var size int64
+ var decompressFunc func(io.Reader) (io.ReadCloser, error)
+ if _, ok := opts.compression.(*gzipCompression); ok && opts.gzipHelperFunc != nil {
+ decompressFunc = opts.gzipHelperFunc
+ } else {
+ decompressFunc = opts.compression.Reader
+ }
+ decompressR, err := decompressFunc(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
if err != nil {
pw.CloseWithError(err)
return
}
- defer r.Close()
- if _, err := io.Copy(diffID.Hash(), r); err != nil {
+ defer decompressR.Close()
+ if size, err = io.Copy(diffID.Hash(), decompressR); err != nil {
pw.CloseWithError(err)
return
}
+ uncompressedSize.Store(size)
+ readCompleted.Store(true)
pw.Close()
}()
return &Blob{
@@ -270,8 +312,10 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
Reader: pr,
closeFunc: layerFiles.CleanupAll,
},
- tocDigest: tocDgst,
- diffID: diffID,
+ tocDigest: tocDgst,
+ diffID: diffID,
+ readCompleted: readCompleted,
+ uncompressedSize: uncompressedSize,
}, nil
}
@@ -366,8 +410,9 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
// Sort the tar file respecting to the prioritized files list.
sorted := &tarFile{}
+ picked := make(map[string]struct{})
for _, l := range prioritized {
- if err := moveRec(l, intar, sorted); err != nil {
+ if err := moveRec(l, intar, sorted, picked); err != nil {
if errors.Is(err, errNotFound) && missedPrioritized != nil {
*missedPrioritized = append(*missedPrioritized, l)
continue // allow not found
@@ -395,8 +440,8 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
})
}
- // Dump all entry and concatinate them.
- return append(sorted.dump(), intar.dump()...), nil
+ // Dump prioritized entries followed by the rest entries while skipping picked ones.
+ return append(sorted.dump(nil), intar.dump(picked)...), nil
}
// readerFromEntries returns a reader of tar archive that contains entries passed
@@ -408,11 +453,11 @@ func readerFromEntries(entries ...*entry) io.Reader {
defer tw.Close()
for _, entry := range entries {
if err := tw.WriteHeader(entry.header); err != nil {
- pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
+ pw.CloseWithError(fmt.Errorf("failed to write tar header: %v", err))
return
}
if _, err := io.Copy(tw, entry.payload); err != nil {
- pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
+ pw.CloseWithError(fmt.Errorf("failed to write tar payload: %v", err))
return
}
}
@@ -436,9 +481,8 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
if err != nil {
if err == io.EOF {
break
- } else {
- return nil, fmt.Errorf("failed to parse tar file, %w", err)
}
+ return nil, fmt.Errorf("failed to parse tar file, %w", err)
}
switch cleanEntryName(h.Name) {
case PrefetchLandmark, NoPrefetchLandmark:
@@ -459,36 +503,42 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
return tf, nil
}
-func moveRec(name string, in *tarFile, out *tarFile) error {
+func moveRec(name string, in *tarFile, out *tarFile, picked map[string]struct{}) error {
name = cleanEntryName(name)
if name == "" { // root directory. stop recursion.
if e, ok := in.get(name); ok {
// entry of the root directory exists. we should move it as well.
// this case will occur if tar entries are prefixed with "./", "/", etc.
- out.add(e)
- in.remove(name)
+ if _, done := picked[name]; !done {
+ out.add(e)
+ picked[name] = struct{}{}
+ }
}
return nil
}
_, okIn := in.get(name)
_, okOut := out.get(name)
- if !okIn && !okOut {
+ _, okPicked := picked[name]
+ if !okIn && !okOut && !okPicked {
return fmt.Errorf("file: %q: %w", name, errNotFound)
}
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
- if err := moveRec(parent, in, out); err != nil {
+ if err := moveRec(parent, in, out, picked); err != nil {
return err
}
if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
- if err := moveRec(e.header.Linkname, in, out); err != nil {
+ if err := moveRec(e.header.Linkname, in, out, picked); err != nil {
return err
}
}
+ if _, done := picked[name]; done {
+ return nil
+ }
if e, ok := in.get(name); ok {
out.add(e)
- in.remove(name)
+ picked[name] = struct{}{}
}
return nil
}
@@ -534,8 +584,18 @@ func (f *tarFile) get(name string) (e *entry, ok bool) {
return
}
-func (f *tarFile) dump() []*entry {
- return f.stream
+func (f *tarFile) dump(skip map[string]struct{}) []*entry {
+ if len(skip) == 0 {
+ return f.stream
+ }
+ var out []*entry
+ for _, e := range f.stream {
+ if _, ok := skip[cleanEntryName(e.header.Name)]; ok {
+ continue
+ }
+ out = append(out, e)
+ }
+ return out
}
type readCloser struct {
@@ -628,12 +688,12 @@ func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
switch whence {
default:
- return 0, fmt.Errorf("Unknown whence: %v", whence)
+ return 0, fmt.Errorf("unknown whence: %v", whence)
case io.SeekStart:
case io.SeekCurrent:
offset += *cr.cPos
case io.SeekEnd:
- return 0, fmt.Errorf("Unsupported whence: %v", whence)
+ return 0, fmt.Errorf("unsupported whence: %v", whence)
}
if offset < 0 {
@@ -650,7 +710,7 @@ func (cr *countReadSeeker) currentPos() int64 {
return *cr.cPos
}
-func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) {
+func decompressBlob(org *io.SectionReader, tmp *tempFiles, gzipHelperFunc GzipHelperFunc) (*io.SectionReader, error) {
if org.Size() < 4 {
return org, nil
}
@@ -661,7 +721,13 @@ func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, e
var dR io.Reader
if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) {
// gzip
- dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ var dgR io.ReadCloser
+ var err error
+ if gzipHelperFunc != nil {
+ dgR, err = gzipHelperFunc(io.NewSectionReader(org, 0, org.Size()))
+ } else {
+ dgR, err = gzip.NewReader(io.NewSectionReader(org, 0, org.Size()))
+ }
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
index f4d554655..ff91a37ad 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
@@ -307,6 +307,15 @@ func (r *Reader) initFields() error {
}
}
+ if len(r.m) == 0 {
+ r.m[""] = &TOCEntry{
+ Name: "",
+ Type: "dir",
+ Mode: 0755,
+ NumLink: 1,
+ }
+ }
+
return nil
}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
index f24afe32f..88fa13b19 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
@@ -109,7 +109,7 @@ func gzipFooterBytes(tocOff int64) []byte {
header[0], header[1] = 'S', 'G'
subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
- gz.Header.Extra = append(header, []byte(subfield)...)
+ gz.Extra = append(header, []byte(subfield)...)
gz.Close()
if buf.Len() != FooterSize {
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
@@ -136,7 +136,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t
return 0, 0, 0, err
}
defer zr.Close()
- extra := zr.Header.Extra
+ extra := zr.Extra
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
if si1 != 'S' || si2 != 'G' {
return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
@@ -181,7 +181,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff
return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
}
defer zr.Close()
- extra := zr.Header.Extra
+ extra := zr.Extra
if len(extra) != 16+len("STARGZ") {
return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size")
}
diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
index 0ca6fd75f..ff165e090 100644
--- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
+++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
@@ -26,18 +26,18 @@ import (
"archive/tar"
"bytes"
"compress/gzip"
+ "crypto/rand"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
- "math/rand"
+ "math/big"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
- "testing"
"time"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
@@ -45,23 +45,51 @@ import (
digest "github.com/opencontainers/go-digest"
)
-func init() {
- rand.Seed(time.Now().UnixNano())
-}
-
// TestingController is Compression with some helper methods necessary for testing.
type TestingController interface {
Compression
- TestStreams(t *testing.T, b []byte, streams []int64)
- DiffIDOf(*testing.T, []byte) string
+ TestStreams(t TestingT, b []byte, streams []int64)
+ DiffIDOf(TestingT, []byte) string
String() string
}
+// TestingT is the minimal set of testing.T required to run the
+// tests defined in CompressionTestSuite. This interface exists to prevent
+// leaking the testing package from being exposed outside tests.
+type TestingT interface {
+ Errorf(format string, args ...any)
+ FailNow()
+ Failed() bool
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Logf(format string, args ...any)
+ Parallel()
+}
+
+// Runner allows running subtests of TestingT. This exists instead of adding
+// a Run method to TestingT interface because the Run implementation of
+// testing.T would not satisfy the interface.
+type Runner func(t TestingT, name string, fn func(t TestingT))
+
+type TestRunner struct {
+ TestingT
+ Runner Runner
+}
+
+func (r *TestRunner) Run(name string, run func(*TestRunner)) {
+ r.Runner(r.TestingT, name, func(t TestingT) {
+ run(&TestRunner{TestingT: t, Runner: r.Runner})
+ })
+}
+
// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
-func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
- t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
- t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
- t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
+func CompressionTestSuite(t *TestRunner, controllers ...TestingControllerFactory) {
+ t.Run("testBuild", func(t *TestRunner) { t.Parallel(); testBuild(t, controllers...) })
+ t.Run("testDigestAndVerify", func(t *TestRunner) {
+ t.Parallel()
+ testDigestAndVerify(t, controllers...)
+ })
+ t.Run("testWriteAndOpen", func(t *TestRunner) { t.Parallel(); testWriteAndOpen(t, controllers...) })
}
type TestingControllerFactory func() TestingController
@@ -82,7 +110,7 @@ var allowedPrefix = [4]string{"", "./", "/", "../"}
// testBuild tests the resulting stargz blob built by this pkg has the same
// contents as the normal stargz blob.
-func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
+func testBuild(t *TestRunner, controllers ...TestingControllerFactory) {
tests := []struct {
name string
chunkSize int
@@ -168,7 +196,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
prefix := prefix
for _, minChunkSize := range tt.minChunkSize {
minChunkSize := minChunkSize
- t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) {
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
// Test divideEntries()
entries, err := sortEntries(tarBlob, nil, nil) // identical order
@@ -268,7 +296,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
}
}
-func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+func isSameTarGz(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool {
aGz, err := cla.Reader(bytes.NewReader(a))
if err != nil {
t.Fatalf("failed to read A")
@@ -328,7 +356,7 @@ func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingContr
return true
}
-func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
+func isSameVersion(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool {
aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
if err != nil {
t.Fatalf("failed to parse A: %v", err)
@@ -342,7 +370,7 @@ func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingCon
return aJTOC.Version == bJTOC.Version
}
-func isSameEntries(t *testing.T, a, b *Reader) bool {
+func isSameEntries(t TestingT, a, b *Reader) bool {
aroot, ok := a.Lookup("")
if !ok {
t.Fatalf("failed to get root of A")
@@ -356,18 +384,19 @@ func isSameEntries(t *testing.T, a, b *Reader) bool {
return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry)
}
-func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader {
+func compressBlob(t TestingT, src *io.SectionReader, srcCompression int) *io.SectionReader {
buf := new(bytes.Buffer)
var w io.WriteCloser
var err error
- if srcCompression == gzipType {
+ switch srcCompression {
+ case gzipType:
w = gzip.NewWriter(buf)
- } else if srcCompression == zstdType {
+ case zstdType:
w, err = zstd.NewWriter(buf)
if err != nil {
t.Fatalf("failed to init zstd writer: %v", err)
}
- } else {
+ default:
return src
}
src.Seek(0, io.SeekStart)
@@ -389,7 +418,7 @@ type stargzEntry struct {
// contains checks if all child entries in "b" are also contained in "a".
// This function also checks if the files/chunks contain the same contents among "a" and "b".
-func contains(t *testing.T, a, b stargzEntry) bool {
+func contains(t TestingT, a, b stargzEntry) bool {
ae, ar := a.e, a.r
be, br := b.e, b.r
t.Logf("Comparing: %q vs %q", ae.Name, be.Name)
@@ -448,7 +477,7 @@ func contains(t *testing.T, a, b stargzEntry) bool {
bbytes, bnext, bok := readOffset(t, bf, nr, b)
if !aok && !bok {
break
- } else if !(aok && bok) || anext != bnext {
+ } else if !aok || !bok || anext != bnext {
t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v",
ae.Name, be.Name, nr, aok, bok, anext, bnext)
return false
@@ -500,7 +529,7 @@ func equalEntry(a, b *TOCEntry) bool {
a.Digest == b.Digest
}
-func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) {
+func readOffset(t TestingT, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) {
ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset)
if !ok {
return nil, 0, false
@@ -519,7 +548,7 @@ func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry)
return data[:n], offset + ce.ChunkSize, true
}
-func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
+func dumpTOCJSON(t TestingT, tocJSON *JTOC) string {
jtocData, err := json.Marshal(*tocJSON)
if err != nil {
t.Fatalf("failed to marshal TOC JSON: %v", err)
@@ -533,20 +562,19 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
const chunkSize = 3
-// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
-type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
+type check func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
// testDigestAndVerify runs specified checks against sample stargz blobs.
-func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
+func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) {
tests := []struct {
name string
- tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
+ tarInit func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry)
checks []check
minChunkSize []int
}{
{
name: "no-regfile",
- tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf(
dir("test/"),
)
@@ -561,7 +589,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
},
{
name: "small-files",
- tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf(
regDigest(t, "baz.txt", "", dgstMap),
regDigest(t, "foo.txt", "a", dgstMap),
@@ -585,7 +613,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
},
{
name: "big-files",
- tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf(
regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
regDigest(t, "foo.txt", "a", dgstMap),
@@ -609,7 +637,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
{
name: "with-non-regfiles",
minChunkSize: []int{0, 64000},
- tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
+ tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) {
return tarOf(
regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
regDigest(t, "foo.txt", "a", dgstMap),
@@ -656,7 +684,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
srcTarFormat := srcTarFormat
for _, minChunkSize := range tt.minChunkSize {
minChunkSize := minChunkSize
- t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) {
// Get original tar file and chunk digests
dgstMap := make(map[string]digest.Digest)
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
@@ -692,7 +720,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory)
// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
// digest and contains valid chunks. It walks all entries in the stargz and
// checks all chunk digests stored to the TOC JSON match the actual contents.
-func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+func checkStargzTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
WithDecompressors(controller),
@@ -803,7 +831,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
// checkVerifyTOC checks the verification works for the TOC JSON of the passed
// stargz. It walks all entries in the stargz and checks the verifications for
// all chunks work.
-func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+func checkVerifyTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
WithDecompressors(controller),
@@ -884,9 +912,9 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
// detected during the verification and the verification returns an error.
func checkVerifyInvalidTOCEntryFail(filename string) check {
- return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
funcs := map[string]rewriteFunc{
- "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ "lost digest in a entry": func(t TestingT, toc *JTOC, sgz *io.SectionReader) {
var found bool
for _, e := range toc.Entries {
if cleanEntryName(e.Name) == filename {
@@ -904,7 +932,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
t.Fatalf("rewrite target not found")
}
},
- "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
+ "duplicated entry offset": func(t TestingT, toc *JTOC, sgz *io.SectionReader) {
var (
sampleEntry *TOCEntry
targetEntry *TOCEntry
@@ -920,16 +948,18 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
}
if sampleEntry == nil {
t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target")
+ return
}
if targetEntry == nil {
t.Fatalf("rewrite target not found")
+ return
}
targetEntry.Offset = sampleEntry.Offset
},
}
for name, rFunc := range funcs {
- t.Run(name, func(t *testing.T) {
+ t.Run(name, func(t *TestRunner) {
newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller)
buf := new(bytes.Buffer)
if _, err := io.Copy(buf, newSgz); err != nil {
@@ -958,7 +988,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
// checkVerifyInvalidStargzFail checks if the verification detects that the
// given stargz file doesn't match to the expected digest and returns error.
func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
- return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
cl := newController()
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
if err != nil {
@@ -990,7 +1020,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
// checkVerifyBrokenContentFail checks if the verifier detects broken contents
// that doesn't match to the expected digest and returns error.
func checkVerifyBrokenContentFail(filename string) check {
- return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
+ return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
// Parse stargz file
sgz, err := Open(
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
@@ -1047,9 +1077,9 @@ func chunkID(name string, offset, size int64) string {
return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size)
}
-type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader)
+type rewriteFunc func(t TestingT, toc *JTOC, sgz *io.SectionReader)
-func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) {
+func rewriteTOCJSON(t TestingT, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) {
decodedJTOC, jtocOffset, err := parseStargz(sgz, controller)
if err != nil {
t.Fatalf("failed to extract TOC JSON: %v", err)
@@ -1120,7 +1150,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
return decodedJTOC, tocOffset, nil
}
-func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
+func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) {
const content = "Some contents"
invalidUtf8 := "\xff\xfe\xfd"
@@ -1464,7 +1494,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
srcTarFormat := srcTarFormat
for _, lossless := range []bool{true, false} {
- t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
+ t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) {
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
origTarDgstr := digest.Canonical.Digester()
tr = io.TeeReader(tr, origTarDgstr.Hash())
@@ -1530,6 +1560,9 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
if err != nil {
t.Fatalf("stargz.Open: %v", err)
}
+ if _, ok := r.Lookup(""); !ok {
+ t.Fatalf("failed to lookup rootdir: %v", err)
+ }
wantTOCVersion := 1
if tt.wantTOCVersion > 0 {
wantTOCVersion = tt.wantTOCVersion
@@ -1628,7 +1661,7 @@ func digestFor(content string) string {
type numTOCEntries int
-func (n numTOCEntries) check(t *testing.T, r *Reader) {
+func (n numTOCEntries) check(t TestingT, r *Reader) {
if r.toc == nil {
t.Fatal("nil TOC")
}
@@ -1648,15 +1681,15 @@ func (n numTOCEntries) check(t *testing.T, r *Reader) {
func checks(s ...stargzCheck) []stargzCheck { return s }
type stargzCheck interface {
- check(t *testing.T, r *Reader)
+ check(t TestingT, r *Reader)
}
-type stargzCheckFn func(*testing.T, *Reader)
+type stargzCheckFn func(TestingT, *Reader)
-func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) }
+func (f stargzCheckFn) check(t TestingT, r *Reader) { f(t, r) }
func maxDepth(max int) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
e, ok := r.Lookup("")
if !ok {
t.Fatal("root directory not found")
@@ -1673,7 +1706,7 @@ func maxDepth(max int) stargzCheck {
})
}
-func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) {
+func getMaxDepth(t TestingT, e *TOCEntry, current, limit int) (max int, rErr error) {
if current > limit {
return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d",
current, limit)
@@ -1695,7 +1728,7 @@ func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr e
}
func hasFileLen(file string, wantLen int) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == file {
if ent.Type != "reg" {
@@ -1711,7 +1744,7 @@ func hasFileLen(file string, wantLen int) stargzCheck {
}
func hasFileXattrs(file, name, value string) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == file {
if ent.Type != "reg" {
@@ -1738,7 +1771,7 @@ func hasFileXattrs(file, name, value string) stargzCheck {
}
func hasFileDigest(file string, digest string) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
ent, ok := r.Lookup(file)
if !ok {
t.Fatalf("didn't find TOCEntry for file %q", file)
@@ -1750,7 +1783,7 @@ func hasFileDigest(file string, digest string) stargzCheck {
}
func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
extraMap := make(map[string]chunkInfo)
for _, e := range extra {
extraMap[e.name] = e
@@ -1797,7 +1830,7 @@ func hasFileContentsWithPreRead(file string, offset int, want string, extra ...c
}
func hasFileContentsRange(file string, offset int, want string) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
f, err := r.OpenFile(file)
if err != nil {
t.Fatal(err)
@@ -1814,7 +1847,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck {
}
func hasChunkEntries(file string, wantChunks int) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
ent, ok := r.Lookup(file)
if !ok {
t.Fatalf("no file for %q", file)
@@ -1858,7 +1891,7 @@ func hasChunkEntries(file string, wantChunks int) stargzCheck {
}
func entryHasChildren(dir string, want ...string) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
want := append([]string(nil), want...)
var got []string
ent, ok := r.Lookup(dir)
@@ -1877,7 +1910,7 @@ func entryHasChildren(dir string, want ...string) stargzCheck {
}
func hasDir(file string) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == cleanEntryName(file) {
if ent.Type != "dir" {
@@ -1891,7 +1924,7 @@ func hasDir(file string) stargzCheck {
}
func hasDirLinkCount(file string, count int) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == cleanEntryName(file) {
if ent.Type != "dir" {
@@ -1909,7 +1942,7 @@ func hasDirLinkCount(file string, count int) stargzCheck {
}
func hasMode(file string, mode os.FileMode) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == cleanEntryName(file) {
if ent.Stat().Mode() != mode {
@@ -1924,7 +1957,7 @@ func hasMode(file string, mode os.FileMode) stargzCheck {
}
func hasSymlink(file, target string) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
for _, ent := range r.toc.Entries {
if ent.Name == file {
if ent.Type != "symlink" {
@@ -1940,7 +1973,7 @@ func hasSymlink(file, target string) stargzCheck {
}
func lookupMatch(name string, want *TOCEntry) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
e, ok := r.Lookup(name)
if !ok {
t.Fatalf("failed to Lookup entry %q", name)
@@ -1953,7 +1986,7 @@ func lookupMatch(name string, want *TOCEntry) stargzCheck {
}
func hasEntryOwner(entry string, owner owner) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
ent, ok := r.Lookup(strings.TrimSuffix(entry, "/"))
if !ok {
t.Errorf("entry %q not found", entry)
@@ -1967,7 +2000,7 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
}
func mustSameEntry(files ...string) stargzCheck {
- return stargzCheckFn(func(t *testing.T, r *Reader) {
+ return stargzCheckFn(func(t TestingT, r *Reader) {
var first *TOCEntry
for _, f := range files {
if first == nil {
@@ -2039,7 +2072,7 @@ func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format
return f(tw, prefix, format)
}
-func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
+func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader {
format := tar.FormatUnknown
for _, opt := range opts {
switch v := opt.(type) {
@@ -2248,7 +2281,7 @@ func noPrefetchLandmark() tarEntry {
})
}
-func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry {
+func regDigest(t TestingT, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry {
if digestMap == nil {
t.Fatalf("digest map mustn't be nil")
}
@@ -2291,7 +2324,11 @@ var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX
func randomContents(n int) string {
b := make([]rune, n)
for i := range b {
- b[i] = runes[rand.Intn(len(runes))]
+ bi, err := rand.Int(rand.Reader, big.NewInt(int64(len(runes))))
+ if err != nil {
+ panic(err)
+ }
+ b[i] = runes[int(bi.Int64())]
}
return string(b)
}
@@ -2314,7 +2351,7 @@ func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
func (f fileInfoOnlyMode) Sys() interface{} { return nil }
-func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
+func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) {
if len(streams) == 0 {
return // nop
}
@@ -2343,8 +2380,8 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
t.Fatalf("countStreams(gzip), Copy: %v", err)
}
var extra string
- if len(zr.Header.Extra) > 0 {
- extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
+ if len(zr.Extra) > 0 {
+ extra = fmt.Sprintf("; extra=%q", zr.Extra)
}
t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
delete(wants, int64(zoff))
@@ -2352,7 +2389,7 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
}
}
-func GzipDiffIDOf(t *testing.T, b []byte) string {
+func GzipDiffIDOf(t TestingT, b []byte) string {
h := sha256.New()
zr, err := gzip.NewReader(bytes.NewReader(b))
if err != nil {
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go
new file mode 100644
index 000000000..0ec4b12c7
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/debug.go
@@ -0,0 +1,62 @@
+package md2man
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/russross/blackfriday/v2"
+)
+
+func fmtListFlags(flags blackfriday.ListType) string {
+ knownFlags := []struct {
+ name string
+ flag blackfriday.ListType
+ }{
+ {"ListTypeOrdered", blackfriday.ListTypeOrdered},
+ {"ListTypeDefinition", blackfriday.ListTypeDefinition},
+ {"ListTypeTerm", blackfriday.ListTypeTerm},
+ {"ListItemContainsBlock", blackfriday.ListItemContainsBlock},
+ {"ListItemBeginningOfList", blackfriday.ListItemBeginningOfList},
+ {"ListItemEndOfList", blackfriday.ListItemEndOfList},
+ }
+
+ var f []string
+ for _, kf := range knownFlags {
+ if flags&kf.flag != 0 {
+ f = append(f, kf.name)
+ flags &^= kf.flag
+ }
+ }
+ if flags != 0 {
+ f = append(f, fmt.Sprintf("Unknown(%#x)", flags))
+ }
+ return strings.Join(f, "|")
+}
+
+type debugDecorator struct {
+ blackfriday.Renderer
+}
+
+func depth(node *blackfriday.Node) int {
+ d := 0
+ for n := node.Parent; n != nil; n = n.Parent {
+ d++
+ }
+ return d
+}
+
+func (d *debugDecorator) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
+ fmt.Fprintf(os.Stderr, "%s%s %v %v\n",
+ strings.Repeat(" ", depth(node)),
+ map[bool]string{true: "+", false: "-"}[entering],
+ node,
+ fmtListFlags(node.ListFlags))
+ var b strings.Builder
+ status := d.Renderer.RenderNode(io.MultiWriter(&b, w), node, entering)
+ if b.Len() > 0 {
+ fmt.Fprintf(os.Stderr, ">> %q\n", b.String())
+ }
+ return status
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
index 42bf32aab..5673f5c0b 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go
@@ -1,16 +1,24 @@
+// Package md2man aims in converting markdown into roff (man pages).
package md2man
import (
+ "os"
+ "strconv"
+
"github.com/russross/blackfriday/v2"
)
// Render converts a markdown document into a roff formatted document.
func Render(doc []byte) []byte {
renderer := NewRoffRenderer()
+ var r blackfriday.Renderer = renderer
+ if v, _ := strconv.ParseBool(os.Getenv("MD2MAN_DEBUG")); v {
+ r = &debugDecorator{Renderer: r}
+ }
return blackfriday.Run(doc,
[]blackfriday.Option{
- blackfriday.WithRenderer(renderer),
+ blackfriday.WithRenderer(r),
blackfriday.WithExtensions(renderer.GetExtensions()),
}...)
}
diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
index 4b19188d9..4f1070fc5 100644
--- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
+++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go
@@ -1,6 +1,7 @@
package md2man
import (
+ "bufio"
"bytes"
"fmt"
"io"
@@ -13,68 +14,72 @@ import (
// roffRenderer implements the blackfriday.Renderer interface for creating
// roff format (manpages) from markdown text
type roffRenderer struct {
- extensions blackfriday.Extensions
listCounters []int
firstHeader bool
- firstDD bool
listDepth int
}
const (
- titleHeader = ".TH "
- topLevelHeader = "\n\n.SH "
- secondLevelHdr = "\n.SH "
- otherHeader = "\n.SS "
- crTag = "\n"
- emphTag = "\\fI"
- emphCloseTag = "\\fP"
- strongTag = "\\fB"
- strongCloseTag = "\\fP"
- breakTag = "\n.br\n"
- paraTag = "\n.PP\n"
- hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
- linkTag = "\n\\[la]"
- linkCloseTag = "\\[ra]"
- codespanTag = "\\fB"
- codespanCloseTag = "\\fR"
- codeTag = "\n.EX\n"
- codeCloseTag = "\n.EE\n"
- quoteTag = "\n.PP\n.RS\n"
- quoteCloseTag = "\n.RE\n"
- listTag = "\n.RS\n"
- listCloseTag = "\n.RE\n"
- dtTag = "\n.TP\n"
- dd2Tag = "\n"
- tableStart = "\n.TS\nallbox;\n"
- tableEnd = ".TE\n"
- tableCellStart = "T{\n"
- tableCellEnd = "\nT}\n"
+ titleHeader = ".TH "
+ topLevelHeader = "\n\n.SH "
+ secondLevelHdr = "\n.SH "
+ otherHeader = "\n.SS "
+ crTag = "\n"
+ emphTag = "\\fI"
+ emphCloseTag = "\\fP"
+ strongTag = "\\fB"
+ strongCloseTag = "\\fP"
+ breakTag = "\n.br\n"
+ paraTag = "\n.PP\n"
+ hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
+ linkTag = "\n\\[la]"
+ linkCloseTag = "\\[ra]"
+ codespanTag = "\\fB"
+ codespanCloseTag = "\\fR"
+ codeTag = "\n.EX\n"
+ codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on).
+ quoteTag = "\n.PP\n.RS\n"
+ quoteCloseTag = "\n.RE\n"
+ listTag = "\n.RS\n"
+ listCloseTag = ".RE\n"
+ dtTag = "\n.TP\n"
+ dd2Tag = "\n"
+ tableStart = "\n.TS\nallbox;\n"
+ tableEnd = ".TE\n"
+ tableCellStart = "T{\n"
+ tableCellEnd = "\nT}"
+ tablePreprocessor = `'\" t`
)
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
// from markdown
-func NewRoffRenderer() *roffRenderer { // nolint: golint
- var extensions blackfriday.Extensions
-
- extensions |= blackfriday.NoIntraEmphasis
- extensions |= blackfriday.Tables
- extensions |= blackfriday.FencedCode
- extensions |= blackfriday.SpaceHeadings
- extensions |= blackfriday.Footnotes
- extensions |= blackfriday.Titleblock
- extensions |= blackfriday.DefinitionLists
- return &roffRenderer{
- extensions: extensions,
- }
+func NewRoffRenderer() *roffRenderer {
+ return &roffRenderer{}
}
// GetExtensions returns the list of extensions used by this renderer implementation
-func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
- return r.extensions
+func (*roffRenderer) GetExtensions() blackfriday.Extensions {
+ return blackfriday.NoIntraEmphasis |
+ blackfriday.Tables |
+ blackfriday.FencedCode |
+ blackfriday.SpaceHeadings |
+ blackfriday.Footnotes |
+ blackfriday.Titleblock |
+ blackfriday.DefinitionLists
}
// RenderHeader handles outputting the header at document start
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
+ // We need to walk the tree to check if there are any tables.
+ // If there are, we need to enable the roff table preprocessor.
+ ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
+ if node.Type == blackfriday.Table {
+ out(w, tablePreprocessor+"\n")
+ return blackfriday.Terminate
+ }
+ return blackfriday.GoToNext
+ })
+
// disable hyphenation
out(w, ".nh\n")
}
@@ -91,7 +96,23 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
switch node.Type {
case blackfriday.Text:
- escapeSpecialChars(w, node.Literal)
+ // Special case: format the NAME section as required for proper whatis parsing.
+ // Refer to the lexgrog(1) and groff_man(7) manual pages for details.
+ if node.Parent != nil &&
+ node.Parent.Type == blackfriday.Paragraph &&
+ node.Parent.Prev != nil &&
+ node.Parent.Prev.Type == blackfriday.Heading &&
+ node.Parent.Prev.FirstChild != nil &&
+ bytes.EqualFold(node.Parent.Prev.FirstChild.Literal, []byte("NAME")) {
+ before, after, found := bytesCut(node.Literal, []byte(" - "))
+ escapeSpecialChars(w, before)
+ if found {
+ out(w, ` \- `)
+ escapeSpecialChars(w, after)
+ }
+ } else {
+ escapeSpecialChars(w, node.Literal)
+ }
case blackfriday.Softbreak:
out(w, crTag)
case blackfriday.Hardbreak:
@@ -129,14 +150,25 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering
case blackfriday.Document:
break
case blackfriday.Paragraph:
- // roff .PP markers break lists
- if r.listDepth > 0 {
- return blackfriday.GoToNext
- }
if entering {
- out(w, paraTag)
+ if r.listDepth > 0 {
+ // roff .PP markers break lists
+ if node.Prev != nil { // continued paragraph
+ if node.Prev.Type == blackfriday.List && node.Prev.ListFlags&blackfriday.ListTypeDefinition == 0 {
+ out(w, ".IP\n")
+ } else {
+ out(w, crTag)
+ }
+ }
+ } else if node.Prev != nil && node.Prev.Type == blackfriday.Heading {
+ out(w, crTag)
+ } else {
+ out(w, paraTag)
+ }
} else {
- out(w, crTag)
+ if node.Next == nil || node.Next.Type != blackfriday.List {
+ out(w, crTag)
+ }
}
case blackfriday.BlockQuote:
if entering {
@@ -199,6 +231,10 @@ func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, enteri
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
openTag := listTag
closeTag := listCloseTag
+ if (entering && r.listDepth == 0) || (!entering && r.listDepth == 1) {
+ openTag = crTag
+ closeTag = ""
+ }
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
// tags for definition lists handled within Item node
openTag = ""
@@ -227,23 +263,25 @@ func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
// DT (definition term): line just before DD (see below).
out(w, dtTag)
- r.firstDD = true
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
// DD (definition description): line that starts with ": ".
//
// We have to distinguish between the first DD and the
// subsequent ones, as there should be no vertical
// whitespace between the DT and the first DD.
- if r.firstDD {
- r.firstDD = false
- } else {
- out(w, dd2Tag)
+ if node.Prev != nil && node.Prev.ListFlags&(blackfriday.ListTypeTerm|blackfriday.ListTypeDefinition) == blackfriday.ListTypeDefinition {
+ if node.Prev.Type == blackfriday.Item &&
+ node.Prev.LastChild != nil &&
+ node.Prev.LastChild.Type == blackfriday.List &&
+ node.Prev.LastChild.ListFlags&blackfriday.ListTypeDefinition == 0 {
+ out(w, ".IP\n")
+ } else {
+ out(w, dd2Tag)
+ }
}
} else {
out(w, ".IP \\(bu 2\n")
}
- } else {
- out(w, "\n")
}
}
@@ -278,9 +316,8 @@ func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, ente
} else if nodeLiteralSize(node) > 30 {
end = tableCellEnd
}
- if node.Next == nil && end != tableCellEnd {
- // Last cell: need to carriage return if we are at the end of the
- // header row and content isn't wrapped in a "tablecell"
+ if node.Next == nil {
+ // Last cell: need to carriage return if we are at the end of the header row.
end += crTag
}
out(w, end)
@@ -318,10 +355,32 @@ func countColumns(node *blackfriday.Node) int {
}
func out(w io.Writer, output string) {
- io.WriteString(w, output) // nolint: errcheck
+ io.WriteString(w, output) //nolint:errcheck
}
func escapeSpecialChars(w io.Writer, text []byte) {
+ scanner := bufio.NewScanner(bytes.NewReader(text))
+
+ // count the number of lines in the text
+ // we need to know this to avoid adding a newline after the last line
+ n := bytes.Count(text, []byte{'\n'})
+ idx := 0
+
+ for scanner.Scan() {
+ dt := scanner.Bytes()
+ if idx < n {
+ idx++
+ dt = append(dt, '\n')
+ }
+ escapeSpecialCharsLine(w, dt)
+ }
+
+ if err := scanner.Err(); err != nil {
+ panic(err)
+ }
+}
+
+func escapeSpecialCharsLine(w io.Writer, text []byte) {
for i := 0; i < len(text); i++ {
// escape initial apostrophe or period
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
@@ -335,7 +394,7 @@ func escapeSpecialChars(w io.Writer, text []byte) {
i++
}
if i > org {
- w.Write(text[org:i]) // nolint: errcheck
+ w.Write(text[org:i]) //nolint:errcheck
}
// escape a character
@@ -343,6 +402,15 @@ func escapeSpecialChars(w io.Writer, text []byte) {
break
}
- w.Write([]byte{'\\', text[i]}) // nolint: errcheck
+ w.Write([]byte{'\\', text[i]}) //nolint:errcheck
+ }
+}
+
+// bytesCut is a copy of [bytes.Cut] to provide compatibility with go1.17
+// and older. We can remove this once we drop support for go1.17 and older.
+func bytesCut(s, sep []byte) (before, after []byte, found bool) {
+ if i := bytes.Index(s, sep); i >= 0 {
+ return s[:i], s[i+len(sep):], true
}
+ return s, nil, false
}
diff --git a/vendor/github.com/distribution/reference/README.md b/vendor/github.com/distribution/reference/README.md
index e2531e49c..172a02e0b 100644
--- a/vendor/github.com/distribution/reference/README.md
+++ b/vendor/github.com/distribution/reference/README.md
@@ -10,7 +10,7 @@ Go library to handle references to container images.
[](https://codecov.io/gh/distribution/reference)
[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fdistribution%2Freference?ref=badge_shield)
-This repository contains a library for handling refrences to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details.
+This repository contains a library for handling references to container images held in container registries. Please see [godoc](https://pkg.go.dev/github.com/distribution/reference) for details.
## Contribution
diff --git a/vendor/github.com/distribution/reference/normalize.go b/vendor/github.com/distribution/reference/normalize.go
index a30229d01..f4128314c 100644
--- a/vendor/github.com/distribution/reference/normalize.go
+++ b/vendor/github.com/distribution/reference/normalize.go
@@ -123,20 +123,51 @@ func ParseDockerRef(ref string) (Named, error) {
// splitDockerDomain splits a repository name to domain and remote-name.
// If no valid domain is found, the default domain is used. Repository name
// needs to be already validated before.
-func splitDockerDomain(name string) (domain, remainder string) {
- i := strings.IndexRune(name, '/')
- if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != localhost && strings.ToLower(name[:i]) == name[:i]) {
- domain, remainder = defaultDomain, name
- } else {
- domain, remainder = name[:i], name[i+1:]
- }
- if domain == legacyDefaultDomain {
- domain = defaultDomain
- }
- if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
- remainder = officialRepoPrefix + remainder
- }
- return
+func splitDockerDomain(name string) (domain, remoteName string) {
+ maybeDomain, maybeRemoteName, ok := strings.Cut(name, "/")
+ if !ok {
+ // Fast-path for single element ("familiar" names), such as "ubuntu"
+ // or "ubuntu:latest". Familiar names must be handled separately, to
+ // prevent them from being handled as "hostname:port".
+ //
+ // Canonicalize them as "docker.io/library/name[:tag]"
+
+ // FIXME(thaJeztah): account for bare "localhost" or "example.com" names, which SHOULD be considered a domain.
+ return defaultDomain, officialRepoPrefix + name
+ }
+
+ switch {
+ case maybeDomain == localhost:
+ // localhost is a reserved namespace and always considered a domain.
+ domain, remoteName = maybeDomain, maybeRemoteName
+ case maybeDomain == legacyDefaultDomain:
+ // canonicalize the Docker Hub and legacy "Docker Index" domains.
+ domain, remoteName = defaultDomain, maybeRemoteName
+ case strings.ContainsAny(maybeDomain, ".:"):
+ // Likely a domain or IP-address:
+ //
+ // - contains a "." (e.g., "example.com" or "127.0.0.1")
+ // - contains a ":" (e.g., "example:5000", "::1", or "[::1]:5000")
+ domain, remoteName = maybeDomain, maybeRemoteName
+ case strings.ToLower(maybeDomain) != maybeDomain:
+ // Uppercase namespaces are not allowed, so if the first element
+ // is not lowercase, we assume it to be a domain-name.
+ domain, remoteName = maybeDomain, maybeRemoteName
+ default:
+ // None of the above: it's not a domain, so use the default, and
+ // use the name input the remote-name.
+ domain, remoteName = defaultDomain, name
+ }
+
+ if domain == defaultDomain && !strings.ContainsRune(remoteName, '/') {
+ // Canonicalize "familiar" names, but only on Docker Hub, not
+ // on other domains:
+ //
+ // "docker.io/ubuntu[:tag]" => "docker.io/library/ubuntu[:tag]"
+ remoteName = officialRepoPrefix + remoteName
+ }
+
+ return domain, remoteName
}
// familiarizeName returns a shortened version of the name familiar
diff --git a/vendor/github.com/distribution/reference/reference.go b/vendor/github.com/distribution/reference/reference.go
index e98c44daa..900398bde 100644
--- a/vendor/github.com/distribution/reference/reference.go
+++ b/vendor/github.com/distribution/reference/reference.go
@@ -35,8 +35,13 @@ import (
)
const (
+ // RepositoryNameTotalLengthMax is the maximum total number of characters in a repository name.
+ RepositoryNameTotalLengthMax = 255
+
// NameTotalLengthMax is the maximum total number of characters in a repository name.
- NameTotalLengthMax = 255
+ //
+ // Deprecated: use [RepositoryNameTotalLengthMax] instead.
+ NameTotalLengthMax = RepositoryNameTotalLengthMax
)
var (
@@ -55,8 +60,8 @@ var (
// ErrNameEmpty is returned for empty, invalid repository names.
ErrNameEmpty = errors.New("repository name must have at least one component")
- // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
- ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+ // ErrNameTooLong is returned when a repository name is longer than RepositoryNameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax)
// ErrNameNotCanonical is returned when a name is not canonical.
ErrNameNotCanonical = errors.New("repository name must be canonical")
@@ -165,6 +170,9 @@ func Path(named Named) (name string) {
return path
}
+// splitDomain splits a named reference into a hostname and path string.
+// If no valid hostname is found, the hostname is empty and the full value
+// is returned as name
func splitDomain(name string) (string, string) {
match := anchoredNameRegexp.FindStringSubmatch(name)
if len(match) != 3 {
@@ -173,19 +181,6 @@ func splitDomain(name string) (string, string) {
return match[1], match[2]
}
-// SplitHostname splits a named reference into a
-// hostname and name string. If no valid hostname is
-// found, the hostname is empty and the full value
-// is returned as name
-//
-// Deprecated: Use [Domain] or [Path].
-func SplitHostname(named Named) (string, string) {
- if r, ok := named.(namedRepository); ok {
- return r.Domain(), r.Path()
- }
- return splitDomain(named.Name())
-}
-
// Parse parses s and returns a syntactically valid Reference.
// If an error was encountered it is returned, along with a nil Reference.
func Parse(s string) (Reference, error) {
@@ -200,10 +195,6 @@ func Parse(s string) (Reference, error) {
return nil, ErrReferenceInvalidFormat
}
- if len(matches[1]) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
-
var repo repository
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
@@ -215,6 +206,10 @@ func Parse(s string) (Reference, error) {
repo.path = matches[1]
}
+ if len(repo.path) > RepositoryNameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
ref := reference{
namedRepository: repo,
tag: matches[2],
@@ -253,14 +248,15 @@ func ParseNamed(s string) (Named, error) {
// WithName returns a named object representing the given string. If the input
// is invalid ErrReferenceInvalidFormat will be returned.
func WithName(name string) (Named, error) {
- if len(name) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
-
match := anchoredNameRegexp.FindStringSubmatch(name)
if match == nil || len(match) != 3 {
return nil, ErrReferenceInvalidFormat
}
+
+ if len(match[2]) > RepositoryNameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
return repository{
domain: match[1],
path: match[2],
diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS
index 483743c99..57af08b20 100644
--- a/vendor/github.com/docker/cli/AUTHORS
+++ b/vendor/github.com/docker/cli/AUTHORS
@@ -2,6 +2,7 @@
# This file lists all contributors to the repository.
# See scripts/docs/generate-authors.sh to make modifications.
+A. Lester Buck III
Aanand Prasad
Aaron L. Xu
Aaron Lehmann
@@ -16,6 +17,7 @@ Adolfo Ochagavía
Adrian Plata
Adrien Duermael
Adrien Folie
+Adyanth Hosavalike
Ahmet Alp Balkan
Aidan Feldman
Aidan Hobson Sayers
@@ -24,9 +26,10 @@ Akhil Mohan
Akihiro Suda
Akim Demaille
Alan Thompson
+Alano Terblanche
Albert Callarisa
Alberto Roura
-Albin Kerouanton
+Albin Kerouanton
Aleksa Sarai
Aleksander Piotrowski
Alessandro Boch
@@ -34,6 +37,7 @@ Alex Couture-Beil
Alex Mavrogiannis
Alex Mayer
Alexander Boyd
+Alexander Chneerov
Alexander Larsson
Alexander Morozov
Alexander Ryabov
@@ -41,8 +45,10 @@ Alexandre González
Alexey Igrychev
Alexis Couvreur
Alfred Landrum
+Ali Rostami
Alicia Lauerman
Allen Sun
+Allie Sadler
Alvin Deng
Amen Belayneh
Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com>
@@ -57,10 +63,12 @@ Andreas Köhler
Andres G. Aragoneses
Andres Leon Rangel
Andrew France
+Andrew He
Andrew Hsu
Andrew Macpherson
Andrew McDonnell
Andrew Po
+Andrew-Zipperer
Andrey Petrov
Andrii Berehuliak
André Martins
@@ -75,11 +83,16 @@ Antonis Kalipetis
Anusha Ragunathan
Ao Li
Arash Deshmeh
+Archimedes Trajano
Arko Dasgupta
Arnaud Porterie
Arnaud Rebillout
+Arthur Flageul
Arthur Peka
+Ashly Mathew
Ashwini Oruganti
+Aslam Ahemad
+Austin Vazquez
Azat Khuyiyakhmetov
Bardia Keyoumarsi
Barnaby Gray
@@ -98,7 +111,9 @@ Bill Wang
Bin Liu
Bingshen Wang
Bishal Das
+Bjorn Neergaard
Boaz Shuster
+Boban Acimovic
Bogdan Anton
Boris Pruessmann
Brad Baker
@@ -109,19 +124,25 @@ Brent Salisbury
Bret Fisher
Brian (bex) Exelbierd
Brian Goff
+Brian Tracy
Brian Wieder
Bruno Sousa
Bryan Bess
Bryan Boreham
Bryan Murphy
bryfry
+Calvin Liu
Cameron Spear
Cao Weiwei
Carlo Mion
Carlos Alexandro Becker
Carlos de Paula
+carsontham
+Carston Schilds
+Casey Korver
Ce Gao
Cedric Davies
+Cesar Talledo
Cezar Sa Espinola
Chad Faragher
Chao Wang
@@ -136,6 +157,7 @@ Chen Chuanliang
Chen Hanxiao
Chen Mingjie
Chen Qiu
+Chris Chinchilla
Chris Couzens
Chris Gavin
Chris Gibson
@@ -150,6 +172,8 @@ Christophe Vidal
Christopher Biscardi
Christopher Crone
Christopher Jones
+Christopher Petito <47751006+krissetto@users.noreply.github.com>
+Christopher Petito
Christopher Svensson
Christy Norman
Chun Chen
@@ -163,6 +187,8 @@ Conner Crosby
Corey Farrell
Corey Quon
Cory Bennet
+Cory Snider
+Craig Osterhout
Craig Wilhite
Cristian Staretu
Daehyeok Mun
@@ -171,6 +197,8 @@ Daisuke Ito
dalanlan
Damien Nadé
Dan Cotora
+Dan Wallis
+Danial Gharib
Daniel Artine
Daniel Cassidy
Daniel Dao
@@ -196,9 +224,10 @@ David Alvarez
David Beitey
David Calavera
David Cramer
-David Dooling
+David Dooling
David Gageot
David Karlsson
+David le Blanc
David Lechner
David Scott
David Sheets
@@ -210,12 +239,14 @@ Denis Defreyne
Denis Gladkikh
Denis Ollier
Dennis Docter
+dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Derek McGowan
Des Preston
Deshi Xiao
Dharmit Shah
Dhawal Yogesh Bhanushali
Dieter Reuter
+Dilep Dev <34891655+DilepDev@users.noreply.github.com>
Dima Stopel
Dimitry Andric
Ding Fei
@@ -232,11 +263,14 @@ DongGeon Lee
Doug Davis
Drew Erny
Ed Costello
+Ed Morley <501702+edmorley@users.noreply.github.com>
Elango Sivanandam
Eli Uriegas
Eli Uriegas
Elias Faxö
Elliot Luo <956941328@qq.com>
+Eng Zer Jun
+Eric Bode
Eric Curtin
Eric Engestrom
Eric G. Noriega
@@ -254,6 +288,7 @@ Eugene Yakubovich
Evan Allrich
Evan Hazlett
Evan Krall
+Evan Lezar
Evelyn Xu
Everett Toews
Fabio Falci
@@ -275,19 +310,25 @@ Frederik Nordahl Jul Sabroe
Frieder Bluemle
Gabriel Gore
Gabriel Nicolas Avellaneda
+Gabriela Georgieva
Gaetan de Villele
Gang Qiao
Gary Schaetz
Genki Takiuchi
George MacRorie
+George Margaritis
George Xie
Gianluca Borello
+Giau. Tran Minh
+Giedrius Jonikas
Gildas Cuisinier
Gio d'Amelio
Gleb Stsenov
Goksu Toprak
Gou Rao
Govind Rai
+Grace Choi
+Graeme Wiebe
Grant Reaber
Greg Pflaum
Gsealy